diff --git a/.gitignore b/.gitignore index 29e0284..d4c81ed 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,6 @@ generated/ +sources/* +!sources/README.md config.yaml __pycache__/ *.pyc diff --git a/README.md b/README.md index 2934c3e..1b51e92 100644 --- a/README.md +++ b/README.md @@ -205,6 +205,20 @@ build.py # Build: .md → .yaml + .json + .prompt.md generated/ # Build output (gitignored) ``` +## Source Mirrors + +External source repositories used during integration are kept under `sources/`: + +```text +sources/ +├── Anthropic-Cybersecurity-Skills/ +├── paperclip-docs-main/ +└── temp-cyber-skills/ +``` + +- `build.py` prefers `personas/_shared` as canonical input. +- If `personas/_shared` is missing, `build.py` falls back to known mirrors under `sources/`. + ### Variant Types | Type | Purpose | Example | diff --git a/build.py b/build.py index efa910a..87a1eee 100755 --- a/build.py +++ b/build.py @@ -8,6 +8,7 @@ New users: copy config.example.yaml → config.yaml and customize. import json import re import sys +import unicodedata from pathlib import Path try: @@ -27,10 +28,48 @@ def load_config(root: Path) -> dict: example_path = root / "config.example.yaml" if example_path.exists(): - print("WARN: No config.yaml found. Using defaults. Copy config.example.yaml → config.yaml to customize.") + print( + "WARN: No config.yaml found. Using defaults. Copy config.example.yaml → config.yaml to customize." + ) return {} +def resolve_shared_dir(root: Path, personas_dir: Path) -> Path | None: + """Resolve canonical shared library path. + + Primary location is personas/_shared. If that is missing, fall back to + known source mirrors under sources/. + """ + primary = personas_dir / "_shared" + if primary.exists(): + return primary + + sources_dir = root / "sources" + fallbacks = [ + sources_dir / "temp-cyber-skills" / "personas" / "_shared", + sources_dir / "paperclip-docs-main" / "_shared", + ] + for candidate in fallbacks: + if candidate.exists(): + return candidate + + return None + + +def discover_sources(root: Path) -> list[str]: + """List known source mirrors under root/sources.""" + sources_dir = root / "sources" + if not sources_dir.exists(): + return [] + + known = [ + "Anthropic-Cybersecurity-Skills", + "paperclip-docs-main", + "temp-cyber-skills", + ] + return [name for name in known if (sources_dir / name).exists()] + + def flatten_config(config: dict, prefix: str = "") -> dict: """Flatten nested config dict for template substitution. @@ -44,7 +83,9 @@ def flatten_config(config: dict, prefix: str = "") -> dict: elif isinstance(value, list): flat[full_key] = value flat[f"{full_key}.count"] = len(value) - flat[f"{full_key}.csv"] = ", ".join(str(v) for v in value if not isinstance(v, dict)) + flat[f"{full_key}.csv"] = ", ".join( + str(v) for v in value if not isinstance(v, dict) + ) else: flat[full_key] = value return flat @@ -52,6 +93,7 @@ def flatten_config(config: dict, prefix: str = "") -> dict: def inject_config(content: str, flat_config: dict) -> str: """Replace {{config.key}} placeholders with config values.""" + def replacer(match): key = match.group(1).strip() value = flat_config.get(key, match.group(0)) # keep original if not found @@ -66,6 +108,7 @@ def inject_config(content: str, flat_config: dict) -> str: def check_conditionals(content: str, flat_config: dict) -> str: """Process {{#if key}}...{{/if}} and {{#unless key}}...{{/unless}} blocks.""" + # Handle {{#if key}}content{{/if}} def if_replacer(match): key = match.group(1).strip() @@ -75,7 +118,9 @@ def check_conditionals(content: str, flat_config: dict) -> str: return body return "" - content = re.sub(r"\{\{#if (.+?)\}\}(.*?)\{\{/if\}\}", if_replacer, content, flags=re.DOTALL) + content = re.sub( + r"\{\{#if (.+?)\}\}(.*?)\{\{/if\}\}", if_replacer, content, flags=re.DOTALL + ) # Handle {{#unless key}}content{{/unless}} def unless_replacer(match): @@ -86,7 +131,12 @@ def check_conditionals(content: str, flat_config: dict) -> str: return body return "" - content = re.sub(r"\{\{#unless (.+?)\}\}(.*?)\{\{/unless\}\}", unless_replacer, content, flags=re.DOTALL) + content = re.sub( + r"\{\{#unless (.+?)\}\}(.*?)\{\{/unless\}\}", + unless_replacer, + content, + flags=re.DOTALL, + ) return content @@ -118,7 +168,9 @@ def parse_persona_md(filepath: Path, flat_config: dict) -> dict: if line.startswith("## "): if current_section: sections[current_section] = "\n".join(current_content).strip() - current_section = line[3:].strip().lower().replace(" ", "_").replace("&", "and") + current_section = ( + line[3:].strip().lower().replace(" ", "_").replace("&", "and") + ) current_content = [] else: current_content.append(line) @@ -133,7 +185,14 @@ def parse_persona_md(filepath: Path, flat_config: dict) -> dict: } -def build_persona(persona_dir: Path, output_dir: Path, flat_config: dict, config: dict, escalation_graph: dict = None, skills_index: dict = None): +def build_persona( + persona_dir: Path, + output_dir: Path, + flat_config: dict, + config: dict, + escalation_graph: dict = None, + skills_index: dict = None, +): """Build all variants for a persona directory.""" md_files = sorted(persona_dir.glob("*.md")) if not md_files: @@ -168,14 +227,27 @@ def build_persona(persona_dir: Path, output_dir: Path, flat_config: dict, config continue # Build output object - output = {**meta, **parsed["metadata"], "variant": variant, "sections": parsed["sections"]} + output = { + **meta, + **parsed["metadata"], + "variant": variant, + "sections": parsed["sections"], + } # Inject config metadata if config: output["_config"] = { "user": config.get("user", {}).get("name", "unknown"), - "tools": {k: v for k, v in config.get("infrastructure", {}).get("tools", {}).items() if v is True}, - "frameworks": {k: v for k, v in config.get("frameworks", {}).items() if v is True}, + "tools": { + k: v + for k, v in config.get("infrastructure", {}) + .get("tools", {}) + .items() + if v is True + }, + "frameworks": { + k: v for k, v in config.get("frameworks", {}).items() if v is True + }, "regional_focus": config.get("regional_focus", {}), } @@ -207,13 +279,17 @@ def build_persona(persona_dir: Path, output_dir: Path, flat_config: dict, config # Write YAML yaml_out = out_path / f"{variant}.yaml" yaml_out.write_text( - yaml.dump(output, allow_unicode=True, default_flow_style=False, sort_keys=False), + yaml.dump( + output, allow_unicode=True, default_flow_style=False, sort_keys=False + ), encoding="utf-8", ) # Write JSON json_out = out_path / f"{variant}.json" - json_out.write_text(json.dumps(output, ensure_ascii=False, indent=2), encoding="utf-8") + json_out.write_text( + json.dumps(output, ensure_ascii=False, indent=2), encoding="utf-8" + ) # Write plain system prompt (just the body, no config metadata) prompt_out = out_path / f"{variant}.prompt.md" @@ -227,37 +303,192 @@ def build_persona(persona_dir: Path, output_dir: Path, flat_config: dict, config DEFAULT_SKILL_PERSONA_MAP = { # Cybersecurity skills → personas - "pentest": ["neo"], "nmap-recon": ["neo", "vortex"], "security-scanner": ["neo", "phantom"], - "sql-injection-testing": ["neo", "phantom"], "stealth-browser": ["neo", "oracle"], - "security-audit-toolkit": ["neo", "forge"], "pwnclaw-security-scan": ["neo"], - "senior-secops": ["bastion"], "clawsec": ["neo", "vortex"], - "pcap-analyzer": ["vortex", "bastion"], "sys-guard-linux-remediator": ["bastion"], - "ctf-writeup-generator": ["neo"], "dns-networking": ["vortex", "architect"], - "network-scanner": ["neo", "vortex"], "security-skill-scanner": ["neo"], - "pentest-active-directory": ["neo"], "pentest-api-attacker": ["neo", "phantom"], - "pentest-auth-bypass": ["neo", "phantom"], "pentest-c2-operator": ["neo", "sentinel"], + "pentest": ["neo"], + "nmap-recon": ["neo", "vortex"], + "security-scanner": ["neo", "phantom"], + "sql-injection-testing": ["neo", "phantom"], + "stealth-browser": ["neo", "oracle"], + "security-audit-toolkit": ["neo", "forge"], + "pwnclaw-security-scan": ["neo"], + "senior-secops": ["bastion"], + "clawsec": ["neo", "vortex"], + "pcap-analyzer": ["vortex", "bastion"], + "sys-guard-linux-remediator": ["bastion"], + "ctf-writeup-generator": ["neo"], + "dns-networking": ["vortex", "architect"], + "network-scanner": ["neo", "vortex"], + "security-skill-scanner": ["neo"], + "pentest-active-directory": ["neo"], + "pentest-api-attacker": ["neo", "phantom"], + "pentest-auth-bypass": ["neo", "phantom"], + "pentest-c2-operator": ["neo", "sentinel"], "gov-cybersecurity": ["sentinel", "bastion"], # Intelligence skills → personas - "osint-investigator": ["oracle"], "seithar-intel": ["sentinel", "frodo"], - "freshrss": ["frodo", "oracle"], "freshrss-reader": ["frodo", "oracle"], - "war-intel-monitor": ["frodo", "marshal"], "news-crawler": ["frodo", "herald"], - "dellight-intelligence-ops": ["frodo", "echo"], "dellight-strategic-intelligence": ["frodo"], - "agent-intelligence-network-scan": ["oracle"], "social-trust-manipulation-detector": ["ghost"], + "osint-investigator": ["oracle"], + "seithar-intel": ["sentinel", "frodo"], + "freshrss": ["frodo", "oracle"], + "freshrss-reader": ["frodo", "oracle"], + "war-intel-monitor": ["frodo", "marshal"], + "news-crawler": ["frodo", "herald"], + "dellight-intelligence-ops": ["frodo", "echo"], + "dellight-strategic-intelligence": ["frodo"], + "agent-intelligence-network-scan": ["oracle"], + "social-trust-manipulation-detector": ["ghost"], # Infrastructure skills → personas - "docker-essentials": ["architect"], "session-logs": ["architect"], + "docker-essentials": ["architect"], + "session-logs": ["architect"], # Document processing → personas - "image-ocr": ["oracle", "scribe"], "mistral-ocr": ["oracle", "scribe"], - "pdf-text-extractor": ["scribe", "scholar"], "youtube-transcript": ["herald", "scholar"], + "image-ocr": ["oracle", "scribe"], + "mistral-ocr": ["oracle", "scribe"], + "pdf-text-extractor": ["scribe", "scholar"], + "youtube-transcript": ["herald", "scholar"], # Web scraping → personas - "deep-scraper": ["oracle"], "crawl-for-ai": ["oracle", "herald"], + "deep-scraper": ["oracle"], + "crawl-for-ai": ["oracle", "herald"], } +VALID_PERSONAS = { + "arbiter", + "architect", + "bastion", + "centurion", + "chronos", + "cipher", + "corsair", + "echo", + "forge", + "frodo", + "gambit", + "ghost", + "herald", + "ledger", + "marshal", + "medic", + "neo", + "oracle", + "phantom", + "polyglot", + "sage", + "scholar", + "scribe", + "sentinel", + "specter", + "tribune", + "vortex", + "warden", + "wraith", +} + + +def parse_skill_frontmatter(skill_md: Path) -> dict: + """Parse YAML frontmatter from SKILL.md; return empty dict if absent/invalid.""" + content = skill_md.read_text(encoding="utf-8") + fm_match = re.match(r"^---\n(.*?)\n---\n", content, re.DOTALL) + if not fm_match: + return {} + parsed = yaml.safe_load(fm_match.group(1)) + return parsed if isinstance(parsed, dict) else {} + + +def infer_personas_from_skill_metadata(skill_name: str, metadata: dict) -> list: + """Infer likely persona mappings using skill frontmatter metadata and naming.""" + name = (skill_name or "").lower() + domain = str(metadata.get("domain", "")).lower() + subdomain = str(metadata.get("subdomain", "")).lower() + description = str(metadata.get("description", "")).lower() + tags = [str(t).lower() for t in metadata.get("tags", []) if t is not None] + blob = " ".join([name, domain, subdomain, description] + tags) + + personas = set() + + # Subdomain affinity + subdomain_map = { + "penetration-testing": ["neo", "phantom", "vortex"], + "application-security": ["phantom", "neo"], + "api-security": ["phantom", "neo"], + "web-security": ["phantom", "neo"], + "malware-analysis": ["specter", "bastion", "sentinel"], + "memory-forensics": ["specter", "bastion"], + "forensics": ["specter", "bastion"], + "threat-intelligence": ["sentinel", "frodo", "oracle"], + "incident-response": ["bastion", "sentinel", "medic"], + "soc-operations": ["bastion", "sentinel"], + "threat-hunting": ["sentinel", "bastion", "vortex"], + "network-security": ["vortex", "bastion"], + "network-forensics": ["vortex", "specter", "bastion"], + "cloud-security": ["architect", "bastion", "sentinel"], + "identity-security": ["cipher", "neo", "bastion"], + "active-directory": ["cipher", "neo", "bastion"], + "vulnerability-management": ["bastion", "forge"], + "compliance": ["ledger", "arbiter", "bastion"], + "ot-security": ["centurion", "bastion", "sentinel"], + } + personas.update(subdomain_map.get(subdomain, [])) + + # Keyword affinity fallback + keyword_map = { + "apt": ["sentinel", "frodo"], + "threat intel": ["sentinel", "oracle", "frodo"], + "ioc": ["sentinel", "bastion"], + "misp": ["sentinel", "oracle"], + "siem": ["bastion", "sentinel"], + "splunk": ["bastion", "sentinel"], + "soc": ["bastion", "sentinel"], + "incident response": ["bastion", "medic", "sentinel"], + "phishing": ["bastion", "oracle", "sentinel"], + "malware": ["specter", "bastion", "sentinel"], + "ransomware": ["specter", "bastion", "sentinel"], + "forensic": ["specter", "bastion"], + "volatility": ["specter", "bastion"], + "yara": ["specter", "bastion"], + "memory": ["specter", "bastion"], + "network": ["vortex", "bastion"], + "zeek": ["vortex", "bastion", "sentinel"], + "wireshark": ["vortex", "bastion"], + "nmap": ["neo", "vortex"], + "pentest": ["neo", "phantom", "vortex"], + "red team": ["neo", "phantom", "specter"], + "web": ["phantom", "neo"], + "xss": ["phantom", "neo"], + "sql injection": ["phantom", "neo"], + "api": ["phantom", "neo"], + "kubernetes": ["architect", "bastion", "sentinel"], + "docker": ["architect", "bastion"], + "aws": ["architect", "bastion", "sentinel"], + "azure": ["architect", "bastion", "sentinel"], + "gcp": ["architect", "bastion", "sentinel"], + "iam": ["cipher", "architect", "bastion"], + "active directory": ["cipher", "neo", "bastion"], + "kerberos": ["cipher", "neo", "bastion"], + "compliance": ["ledger", "arbiter", "bastion"], + "nist": ["ledger", "bastion", "sentinel"], + "ot": ["centurion", "bastion", "sentinel"], + "scada": ["centurion", "bastion", "sentinel"], + "ics": ["centurion", "bastion", "sentinel"], + } + for keyword, mapped_personas in keyword_map.items(): + if keyword in blob: + personas.update(mapped_personas) + + # Conservative fallback for unmapped cybersecurity skills + if not personas and "cyber" in domain: + personas.update(["bastion"]) + + # Keep only valid personas and deterministic order + return sorted([p for p in personas if p in VALID_PERSONAS]) + + def load_skill_persona_map(config: dict) -> dict: """Load skill→persona mapping from config.yaml or use defaults.""" custom = config.get("skill_persona_map", {}) - merged = dict(DEFAULT_SKILL_PERSONA_MAP) - merged.update(custom) + merged = { + k: [p for p in v if p in VALID_PERSONAS] + for k, v in DEFAULT_SKILL_PERSONA_MAP.items() + } + for skill, personas in custom.items(): + if isinstance(personas, list): + merged[skill] = [p for p in personas if p in VALID_PERSONAS] return merged @@ -289,7 +520,9 @@ def search_skills(shared_dir: Path, query: str): desc = "" for line in content.split("\n"): line = line.strip() - if line and not line.startswith(("---", "#", "name:", "description:")): + if line and not line.startswith( + ("---", "#", "name:", "description:") + ): desc = line[:100] break results.append((score, name, skills_subdir, desc)) @@ -297,7 +530,7 @@ def search_skills(shared_dir: Path, query: str): results.sort(key=lambda x: -x[0]) print(f"\n Search: '{query}' — {len(results)} results\n") for i, (score, name, source, desc) in enumerate(results[:20]): - print(f" {i+1:2}. [{score:3}] {name} ({source})") + print(f" {i + 1:2}. [{score:3}] {name} ({source})") if desc: print(f" {desc}") if len(results) > 20: @@ -351,20 +584,26 @@ def run_tests(personas_dir: Path, target: str = None): # Check must_include keywords exist in persona definition for keyword in expect.get("must_include", []): if keyword.lower() not in prompt_content: - warnings.append(f" {persona_name}/{test_name}: '{keyword}' not in persona prompt") + warnings.append( + f" {persona_name}/{test_name}: '{keyword}' not in persona prompt" + ) test_passed = False # Check escalation targets are defined if expect.get("escalation"): target_persona = expect["escalation"].lower() if target_persona not in prompt_content: - warnings.append(f" {persona_name}/{test_name}: escalation to '{target_persona}' not defined in boundaries") + warnings.append( + f" {persona_name}/{test_name}: escalation to '{target_persona}' not defined in boundaries" + ) test_passed = False # Check confidence language for intel personas if expect.get("confidence"): if "confidence" not in prompt_content and "high" not in prompt_content: - warnings.append(f" {persona_name}/{test_name}: confidence levels not defined in persona") + warnings.append( + f" {persona_name}/{test_name}: confidence levels not defined in persona" + ) test_passed = False if test_passed: @@ -384,9 +623,16 @@ def run_tests(personas_dir: Path, target: str = None): def build_skills_index(shared_dir: Path, config: dict = None) -> dict: - """Index all shared skills from _shared/skills/ and _shared/paperclip-skills/.""" + """Index all shared skills from _shared/{skills,paperclip-skills,community-skills}/.""" skill_map = load_skill_persona_map(config or {}) - index = {"skills": {}, "paperclip_skills": {}, "design_brands": [], "ui_ux_styles": 0, "_skill_persona_map": skill_map} + index = { + "skills": {}, + "paperclip_skills": {}, + "community_skills": {}, + "design_brands": [], + "ui_ux_styles": 0, + "_skill_persona_map": skill_map, + } # Index shared-skills skills_dir = shared_dir / "skills" @@ -396,16 +642,33 @@ def build_skills_index(shared_dir: Path, config: dict = None) -> dict: continue skill_md = skill_dir / "SKILL.md" if skill_md.exists(): + skill_meta = parse_skill_frontmatter(skill_md) + inferred_personas = infer_personas_from_skill_metadata( + skill_dir.name, skill_meta + ) + configured_personas = skill_map.get(skill_dir.name, []) + merged_personas = sorted( + set(configured_personas).union(inferred_personas) + ) content = skill_md.read_text(encoding="utf-8") first_line = "" for line in content.split("\n"): line = line.strip() - if line and not line.startswith(("---", "#", "name:", "description:")): + if line and not line.startswith( + ("---", "#", "name:", "description:") + ): first_line = line[:120] break index["skills"][skill_dir.name] = { - "personas": skill_map.get(skill_dir.name, []), + "personas": merged_personas, "summary": first_line, + "domain": str(skill_meta.get("domain", "")), + "subdomain": str(skill_meta.get("subdomain", "")), + "tags": skill_meta.get("tags", []), + "mapped_by": { + "explicit": configured_personas, + "inferred": inferred_personas, + }, "has_references": (skill_dir / "references").is_dir(), } @@ -419,10 +682,22 @@ def build_skills_index(shared_dir: Path, config: dict = None) -> dict: if skill_md.exists(): index["paperclip_skills"][skill_dir.name] = True + # Index community-skills + cskills_dir = shared_dir / "community-skills" + if cskills_dir.exists(): + for skill_dir in sorted(cskills_dir.iterdir()): + if not skill_dir.is_dir(): + continue + skill_md = skill_dir / "SKILL.md" + if skill_md.exists(): + index["community_skills"][skill_dir.name] = True + # Index design brands design_dir = shared_dir / "design-md" if design_dir.exists(): - index["design_brands"] = sorted([d.name for d in design_dir.iterdir() if d.is_dir()]) + index["design_brands"] = sorted( + [d.name for d in design_dir.iterdir() if d.is_dir()] + ) # Count UI/UX data uiux_dir = shared_dir / "ui-ux-pro-max" / "data" @@ -477,7 +752,9 @@ def validate_persona(persona_name: str, parsed: dict) -> list: if section not in parsed.get("sections", {}): warnings.append(f"Missing section: {section}") elif len(parsed["sections"][section].split()) < 30: - warnings.append(f"Thin section ({len(parsed['sections'][section].split())} words): {section}") + warnings.append( + f"Thin section ({len(parsed['sections'][section].split())} words): {section}" + ) fm = parsed.get("metadata", {}) for field in ["codename", "name", "domain", "address_to", "tone"]: @@ -487,7 +764,13 @@ def validate_persona(persona_name: str, parsed: dict) -> list: return warnings -def build_catalog(personas_dir: Path, output_dir: Path, config: dict, flat_config: dict): +def build_catalog( + personas_dir: Path, + output_dir: Path, + config: dict, + flat_config: dict, + shared_dir: Path | None, +): """Generate CATALOG.md with stats, escalation paths, and trigger index.""" addresses = config.get("persona_defaults", {}).get("custom_addresses", {}) @@ -515,7 +798,11 @@ def build_catalog(personas_dir: Path, output_dir: Path, config: dict, flat_confi meta = yaml.safe_load(meta_file.read_text(encoding="utf-8")) or {} codename = meta.get("codename", persona_dir.name) address = addresses.get(persona_dir.name, meta.get("address_to", "N/A")) - variants = [f.stem for f in sorted(persona_dir.glob("*.md")) if not f.name.startswith("_")] + variants = [ + f.stem + for f in sorted(persona_dir.glob("*.md")) + if not f.name.startswith("_") + ] # Parse general.md for stats general = persona_dir / "general.md" @@ -540,7 +827,9 @@ def build_catalog(personas_dir: Path, output_dir: Path, config: dict, flat_confi catalog_lines.append(f"- **Domain:** {meta.get('domain', 'N/A')}") catalog_lines.append(f"- **Hitap:** {address}") catalog_lines.append(f"- **Variants:** {', '.join(variants)}") - catalog_lines.append(f"- **Depth:** {word_count:,} words, {section_count} sections") + catalog_lines.append( + f"- **Depth:** {word_count:,} words, {section_count} sections" + ) if escalates_to: catalog_lines.append(f"- **Escalates to:** {', '.join(escalates_to)}") catalog_lines.append("") @@ -559,7 +848,9 @@ def build_catalog(personas_dir: Path, output_dir: Path, config: dict, flat_confi catalog_lines.append("## Build Statistics\n") catalog_lines.append(f"- Total prompt content: {total_words:,} words") catalog_lines.append(f"- Total sections: {total_sections}") - catalog_lines.append(f"- Escalation connections: {sum(len(v) for v in escalation_graph.values())}") + catalog_lines.append( + f"- Escalation connections: {sum(len(v) for v in escalation_graph.values())}" + ) catalog_lines.append(f"- Unique triggers: {len(trigger_index)}") catalog_lines.append("") @@ -580,13 +871,18 @@ def build_catalog(personas_dir: Path, output_dir: Path, config: dict, flat_confi print(f" Index: {index_path}/escalation_graph.json, trigger_index.json") # Write skills index if shared dir exists - shared_dir = personas_dir / "_shared" - if shared_dir.exists(): - si = build_skills_index(shared_dir) + if shared_dir and shared_dir.exists(): + si = build_skills_index(shared_dir, config) (index_path / "skills_index.json").write_text( json.dumps(si, indent=2, ensure_ascii=False), encoding="utf-8" ) - print(f" Skills: {len(si.get('skills', {}))} shared + {len(si.get('paperclip_skills', {}))} paperclip + {len(si.get('design_brands', []))} design brands + {si.get('ui_ux_styles', 0)} UI/UX data files") + print( + f" Skills: {len(si.get('skills', {}))} shared + " + f"{len(si.get('paperclip_skills', {}))} paperclip + " + f"{len(si.get('community_skills', {}))} community + " + f"{len(si.get('design_brands', []))} design brands + " + f"{si.get('ui_ux_styles', 0)} UI/UX data files" + ) # Print validation warnings if all_warnings: @@ -597,7 +893,9 @@ def build_catalog(personas_dir: Path, output_dir: Path, config: dict, flat_confi return total_words -def print_summary(config: dict, total_personas: int, total_variants: int, total_words: int = 0): +def print_summary( + config: dict, total_personas: int, total_variants: int, total_words: int = 0 +): """Print build summary with config status.""" print("\n" + "=" * 50) print(f"BUILD COMPLETE") @@ -609,8 +907,14 @@ def print_summary(config: dict, total_personas: int, total_variants: int, total_ if config: user = config.get("user", {}).get("name", "?") - tools_on = sum(1 for v in config.get("infrastructure", {}).get("tools", {}).values() if v is True) - frameworks_on = sum(1 for v in config.get("frameworks", {}).values() if v is True) + tools_on = sum( + 1 + for v in config.get("infrastructure", {}).get("tools", {}).values() + if v is True + ) + frameworks_on = sum( + 1 for v in config.get("frameworks", {}).values() if v is True + ) regions = config.get("regional_focus", {}).get("primary", []) print(f"\n Config: {user}") print(f" Tools: {tools_on} enabled") @@ -641,7 +945,11 @@ def install_claude(output_dir: Path): for prompt_file in persona_dir.glob("*.prompt.md"): variant = prompt_file.stem codename = persona_dir.name - cmd_name = f"persona-{codename}" if variant == "general" else f"persona-{codename}-{variant}" + cmd_name = ( + f"persona-{codename}" + if variant == "general" + else f"persona-{codename}-{variant}" + ) dest = commands_dir / f"{cmd_name}.md" content = prompt_file.read_text(encoding="utf-8") command_content = f"{content}\n\n---\nUser query: $ARGUMENTS\n" @@ -683,10 +991,24 @@ def install_claude(output_dir: Path): "name": codename, "description": f"{name} ({address_to}) — {role}. {domain}.", "instructions": instructions, - "allowedTools": ["Read(*)", "Edit(*)", "Write(*)", "Bash(*)", "Glob(*)", "Grep(*)", "WebFetch(*)", "WebSearch(*)"], + "allowedTools": [ + "Read(*)", + "Edit(*)", + "Write(*)", + "Bash(*)", + "Glob(*)", + "Grep(*)", + "WebFetch(*)", + "WebSearch(*)", + ], } agent_file = agents_dir / f"{codename}.yml" - agent_file.write_text(yaml.dump(agent, allow_unicode=True, default_flow_style=False, sort_keys=False), encoding="utf-8") + agent_file.write_text( + yaml.dump( + agent, allow_unicode=True, default_flow_style=False, sort_keys=False + ), + encoding="utf-8", + ) agent_count += 1 print(f" Claude: {cmd_count} commands + {agent_count} agents installed") @@ -730,10 +1052,13 @@ def install_gemini(output_dir: Path): gem = { "name": f"{name} — {variant}" if variant != "general" else name, "description": f"{data.get('role', '')} | {data.get('domain', '')}", - "system_instruction": data.get("sections", {}).get("soul", "") + "\n\n" + - data.get("sections", {}).get("expertise", "") + "\n\n" + - data.get("sections", {}).get("methodology", "") + "\n\n" + - data.get("sections", {}).get("behavior_rules", ""), + "system_instruction": data.get("sections", {}).get("soul", "") + + "\n\n" + + data.get("sections", {}).get("expertise", "") + + "\n\n" + + data.get("sections", {}).get("methodology", "") + + "\n\n" + + data.get("sections", {}).get("behavior_rules", ""), "metadata": { "codename": codename, "variant": variant, @@ -744,17 +1069,26 @@ def install_gemini(output_dir: Path): }, } dest = gems_dir / f"{codename}-{variant}.json" - dest.write_text(json.dumps(gem, ensure_ascii=False, indent=2), encoding="utf-8") + dest.write_text( + json.dumps(gem, ensure_ascii=False, indent=2), encoding="utf-8" + ) count += 1 print(f" Gemini: {count} gems generated to {gems_dir}") return count -def install_paperclip(output_dir: Path, personas_dir: Path): +def install_paperclip(output_dir: Path, personas_dir: Path, shared_dir: Path | None): """Install personas as Paperclip agents (SOUL.md + hermes-config.yaml + AGENTS.md per agent).""" pc_dir = output_dir / "_paperclip" agents_dir = pc_dir / "agents" skills_dir = pc_dir / "skills" + + # Recreate output for deterministic full migration. + if pc_dir.exists(): + import shutil + + shutil.rmtree(pc_dir) + agents_dir.mkdir(parents=True, exist_ok=True) skills_dir.mkdir(parents=True, exist_ok=True) @@ -860,11 +1194,13 @@ def install_paperclip(output_dir: Path, personas_dir: Path): agents_md_lines.append(f"- → {target}") agents_md_lines.append("") - (agent_dir / "AGENTS.md").write_text("\n".join(agents_md_lines), encoding="utf-8") + (agent_dir / "AGENTS.md").write_text( + "\n".join(agents_md_lines), encoding="utf-8" + ) agent_count += 1 # Copy shared skills as Paperclip skills (SKILL.md format already compatible) - shared_skills = personas_dir / "_shared" / "skills" + shared_skills = shared_dir / "skills" if shared_dir else Path("__missing__") if shared_skills.exists(): for skill_dir in sorted(shared_skills.iterdir()): if not skill_dir.is_dir(): @@ -873,15 +1209,18 @@ def install_paperclip(output_dir: Path, personas_dir: Path): if skill_md.exists(): dest = skills_dir / skill_dir.name dest.mkdir(parents=True, exist_ok=True) - (dest / "SKILL.md").write_text(skill_md.read_text(encoding="utf-8"), encoding="utf-8") + (dest / "SKILL.md").write_text( + skill_md.read_text(encoding="utf-8"), encoding="utf-8" + ) refs = skill_dir / "references" if refs.is_dir(): import shutil + shutil.copytree(refs, dest / "references", dirs_exist_ok=True) skill_count += 1 # Copy paperclip-specific skills - pc_skills = personas_dir / "_shared" / "paperclip-skills" + pc_skills = shared_dir / "paperclip-skills" if shared_dir else Path("__missing__") if pc_skills.exists(): for skill_dir in sorted(pc_skills.iterdir()): if not skill_dir.is_dir(): @@ -890,25 +1229,54 @@ def install_paperclip(output_dir: Path, personas_dir: Path): if skill_md.exists() and not (skills_dir / skill_dir.name).exists(): dest = skills_dir / skill_dir.name dest.mkdir(parents=True, exist_ok=True) - (dest / "SKILL.md").write_text(skill_md.read_text(encoding="utf-8"), encoding="utf-8") + (dest / "SKILL.md").write_text( + skill_md.read_text(encoding="utf-8"), encoding="utf-8" + ) refs = skill_dir / "references" if refs.is_dir(): import shutil + shutil.copytree(refs, dest / "references", dirs_exist_ok=True) scripts = skill_dir / "scripts" if scripts.is_dir(): import shutil + shutil.copytree(scripts, dest / "scripts", dirs_exist_ok=True) skill_count += 1 # Deploy original Paperclip company agents from _shared/paperclip-agents/ - pc_agents_src = personas_dir / "_shared" / "paperclip-agents" + pc_agents_src = ( + shared_dir / "paperclip-agents" if shared_dir else Path("__missing__") + ) pc_agent_count = 0 + + def normalize_agent_name(name: str) -> str: + """Normalize escaped/unicode-heavy names to stable ASCII directory names.""" + decoded = re.sub( + r"#U([0-9A-Fa-f]{4})", + lambda m: chr(int(m.group(1), 16)), + name, + ) + ascii_name = ( + unicodedata.normalize("NFKD", decoded) + .encode("ascii", "ignore") + .decode("ascii") + ) + # Keep names filesystem-safe and deterministic. + slug = re.sub(r"[^a-zA-Z0-9]+", "-", ascii_name).strip("-").lower() + return slug or decoded + if pc_agents_src.exists(): + seen_company_agents = set() + collision_count = 0 for agent_src in sorted(pc_agents_src.iterdir()): if not agent_src.is_dir(): continue - agent_name = agent_src.name + agent_name = normalize_agent_name(agent_src.name) + if agent_name in seen_company_agents: + collision_count += 1 + continue + seen_company_agents.add(agent_name) # Skip if persona-based agent already exists with same name if (agents_dir / agent_name).exists(): continue @@ -916,11 +1284,19 @@ def install_paperclip(output_dir: Path, personas_dir: Path): dest.mkdir(parents=True, exist_ok=True) for f in agent_src.iterdir(): if f.is_file(): - (dest / f.name).write_text(f.read_text(encoding="utf-8"), encoding="utf-8") + (dest / f.name).write_text( + f.read_text(encoding="utf-8"), encoding="utf-8" + ) pc_agent_count += 1 + if collision_count: + print( + f" Note: skipped {collision_count} duplicate company agent source dirs after name normalization" + ) total_agents = agent_count + pc_agent_count - print(f" Paperclip: {agent_count} persona agents + {pc_agent_count} company agents + {skill_count} skills to {pc_dir}") + print( + f" Paperclip: {agent_count} persona agents + {pc_agent_count} company agents + {skill_count} skills to {pc_dir}" + ) return total_agents @@ -955,13 +1331,28 @@ def install_openclaw(output_dir: Path): def main(): import argparse - parser = argparse.ArgumentParser(description="Build persona library and optionally install to platforms.") - parser.add_argument("--install", choices=["claude", "antigravity", "gemini", "openclaw", "paperclip", "all"], - help="Install generated personas to a target platform") - parser.add_argument("--search", type=str, metavar="QUERY", - help="Search across all shared skills (e.g. --search 'pentest AD')") - parser.add_argument("--test", nargs="?", const="__all__", metavar="PERSONA", - help="Run persona test suite (optionally specify persona name)") + + parser = argparse.ArgumentParser( + description="Build persona library and optionally install to platforms." + ) + parser.add_argument( + "--install", + choices=["claude", "antigravity", "gemini", "openclaw", "paperclip", "all"], + help="Install generated personas to a target platform", + ) + parser.add_argument( + "--search", + type=str, + metavar="QUERY", + help="Search across all shared skills (e.g. --search 'pentest AD')", + ) + parser.add_argument( + "--test", + nargs="?", + const="__all__", + metavar="PERSONA", + help="Run persona test suite (optionally specify persona name)", + ) args = parser.parse_args() root = Path(__file__).parent @@ -979,17 +1370,28 @@ def main(): # Find all persona directories persona_dirs = [ - d for d in sorted(personas_dir.iterdir()) if d.is_dir() and not d.name.startswith((".", "_")) + d + for d in sorted(personas_dir.iterdir()) + if d.is_dir() and not d.name.startswith((".", "_")) ] if not persona_dirs: print("No persona directories found.") sys.exit(1) - shared_dir = personas_dir / "_shared" + shared_dir = resolve_shared_dir(root, personas_dir) + source_mirrors = discover_sources(root) + + if source_mirrors: + print(f"Detected source mirrors: {', '.join(source_mirrors)}") + else: + print("Detected source mirrors: none") # Handle search-only mode if args.search: + if not shared_dir: + print("No shared skill library found.") + return search_skills(shared_dir, args.search) return @@ -1004,18 +1406,26 @@ def main(): # Pre-build escalation graph and skills index escalation_graph = build_escalation_graph(personas_dir, flat_config) - skills_index = build_skills_index(shared_dir, config) if shared_dir.exists() else {} + skills_index = build_skills_index(shared_dir, config) if shared_dir else {} total_variants = 0 for pdir in persona_dirs: - total_variants += build_persona(pdir, output_dir, flat_config, config, escalation_graph, skills_index) + total_variants += build_persona( + pdir, output_dir, flat_config, config, escalation_graph, skills_index + ) - total_words = build_catalog(personas_dir, output_dir, config, flat_config) + total_words = build_catalog( + personas_dir, output_dir, config, flat_config, shared_dir + ) # Platform installation if args.install: print(f"\n--- Installing to: {args.install} ---\n") - targets = ["claude", "antigravity", "gemini", "openclaw", "paperclip"] if args.install == "all" else [args.install] + targets = ( + ["claude", "antigravity", "gemini", "openclaw", "paperclip"] + if args.install == "all" + else [args.install] + ) for target in targets: if target == "claude": install_claude(output_dir) @@ -1026,7 +1436,7 @@ def main(): elif target == "openclaw": install_openclaw(output_dir) elif target == "paperclip": - install_paperclip(output_dir, personas_dir) + install_paperclip(output_dir, personas_dir, shared_dir) print_summary(config, len(persona_dirs), total_variants, total_words) diff --git a/personas/_shared/anthropic-cybersecurity-skills/.claude-plugin/marketplace.json b/personas/_shared/anthropic-cybersecurity-skills/.claude-plugin/marketplace.json new file mode 100644 index 0000000..58cd50e --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/.claude-plugin/marketplace.json @@ -0,0 +1,37 @@ +{ + "name": "anthropic-cybersecurity-skills", + "owner": { + "name": "mukul975", + "email": "mukuljangra5@gmail.com" + }, + "metadata": { + "description": "754 cybersecurity skills for AI agents mapped to 5 frameworks: MITRE ATT&CK, NIST CSF 2.0, MITRE ATLAS, D3FEND, and NIST AI RMF.", + "version": "1.2.0" + }, + "plugins": [ + { + "name": "cybersecurity-skills", + "source": "./", + "description": "754 cybersecurity skills covering web security, pentesting, DFIR, threat intelligence, cloud security, malware analysis, and more. Mapped to 5 frameworks.", + "version": "1.2.0", + "author": { + "name": "mukul975" + }, + "license": "Apache-2.0", + "keywords": [ + "cybersecurity", + "pentesting", + "forensics", + "threat-intelligence", + "cloud-security", + "malware-analysis", + "incident-response", + "zero-trust", + "devsecops" + ], + "category": "security", + "homepage": "https://github.com/mukul975/Anthropic-Cybersecurity-Skills", + "repository": "https://github.com/mukul975/Anthropic-Cybersecurity-Skills" + } + ] +} \ No newline at end of file diff --git a/personas/_shared/anthropic-cybersecurity-skills/.claude-plugin/plugin.json b/personas/_shared/anthropic-cybersecurity-skills/.claude-plugin/plugin.json new file mode 100644 index 0000000..ff4afc1 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/.claude-plugin/plugin.json @@ -0,0 +1,5 @@ +{ + "name": "cybersecurity-skills", + "description": "753 cybersecurity skills covering web security, pentesting, DFIR, threat intelligence, cloud security, malware analysis, and more.", + "version": "1.0.0" +} diff --git a/personas/_shared/anthropic-cybersecurity-skills/.github/FUNDING.yml b/personas/_shared/anthropic-cybersecurity-skills/.github/FUNDING.yml new file mode 100644 index 0000000..1c344b7 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/.github/FUNDING.yml @@ -0,0 +1,2 @@ +github: mukul975 +custom: ["https://paypal.me/mahipaljangra"] diff --git a/personas/_shared/anthropic-cybersecurity-skills/.github/ISSUE_TEMPLATE/bug-report.yml b/personas/_shared/anthropic-cybersecurity-skills/.github/ISSUE_TEMPLATE/bug-report.yml new file mode 100644 index 0000000..ae36bd5 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/.github/ISSUE_TEMPLATE/bug-report.yml @@ -0,0 +1,50 @@ +name: Bug Report +description: Report a SKILL.md validation error, broken script, or incorrect content +title: "[Bug]: " +labels: ["bug", "needs-triage"] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to report a bug! + - type: input + id: skill-name + attributes: + label: Skill Name + description: Which skill has the issue? + placeholder: e.g., analyzing-disk-image-with-autopsy + validations: + required: true + - type: dropdown + id: bug-type + attributes: + label: Bug Type + options: + - SKILL.md validation error + - Broken/incorrect script + - Wrong instructions or commands + - Missing required files + - Incorrect metadata/frontmatter + - Other + validations: + required: true + - type: textarea + id: description + attributes: + label: Description + description: What is the issue? + validations: + required: true + - type: textarea + id: expected + attributes: + label: Expected Behavior + description: What should happen instead? + validations: + required: true + - type: input + id: ai-agent + attributes: + label: AI Agent Used + description: Which AI agent were you using? + placeholder: e.g., Claude Code, GitHub Copilot, Codex CLI diff --git a/personas/_shared/anthropic-cybersecurity-skills/.github/ISSUE_TEMPLATE/config.yml b/personas/_shared/anthropic-cybersecurity-skills/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..d2b10ec --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: false +contact_links: + - name: Security Vulnerability + url: https://github.com/mukul975/Anthropic-Cybersecurity-Skills/security/advisories/new + about: Report a security vulnerability in this repository + - name: Discussion + url: https://github.com/mukul975/Anthropic-Cybersecurity-Skills/discussions + about: Ask questions or discuss ideas diff --git a/personas/_shared/anthropic-cybersecurity-skills/.github/ISSUE_TEMPLATE/improve-skill.md b/personas/_shared/anthropic-cybersecurity-skills/.github/ISSUE_TEMPLATE/improve-skill.md new file mode 100644 index 0000000..14de01f --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/.github/ISSUE_TEMPLATE/improve-skill.md @@ -0,0 +1,19 @@ +--- +name: Improve existing skill +about: Suggest improvements to an existing skill +title: '[IMPROVE] skill-name-here' +labels: 'enhancement' +assignees: '' +--- + +## Skill to improve + + +## What needs improvement? +- [ ] agent.py has errors or placeholders +- [ ] api-reference.md is incomplete +- [ ] SKILL.md frontmatter is missing fields +- [ ] ATT&CK mapping is incorrect +- [ ] Other: + +## Suggested improvement diff --git a/personas/_shared/anthropic-cybersecurity-skills/.github/ISSUE_TEMPLATE/new-skill-request.yml b/personas/_shared/anthropic-cybersecurity-skills/.github/ISSUE_TEMPLATE/new-skill-request.yml new file mode 100644 index 0000000..ac19d3f --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/.github/ISSUE_TEMPLATE/new-skill-request.yml @@ -0,0 +1,58 @@ +name: New Skill Request +description: Request a new cybersecurity skill to be added to the database +title: "[Skill Request]: " +labels: ["enhancement", "new-skill", "help wanted"] +body: + - type: markdown + attributes: + value: | + Request a new cybersecurity skill. The more detail you provide, the faster we can add it! + - type: input + id: skill-name + attributes: + label: Proposed Skill Name + description: Kebab-case gerund form (e.g., analyzing-memory-dump-with-volatility) + placeholder: performing-task-name + validations: + required: true + - type: dropdown + id: category + attributes: + label: Category + options: + - Threat Detection + - Incident Response + - Penetration Testing + - Digital Forensics + - Compliance & Governance + - Network Security + - Cloud Security + - Application Security + - Malware Analysis + - OSINT + - Zero Trust Architecture + - OT/ICS Security + - DevSecOps + - Ransomware Defense + - Threat Intelligence + - Other + validations: + required: true + - type: textarea + id: description + attributes: + label: Skill Description + description: What should this skill teach an AI agent to do? + validations: + required: true + - type: input + id: mitre-attack + attributes: + label: MITRE ATT&CK Technique(s) + description: Optional - relevant technique IDs + placeholder: e.g., T1059, T1078 + - type: textarea + id: tools + attributes: + label: Key Tools + description: What tools/commands should this skill cover? diff --git a/personas/_shared/anthropic-cybersecurity-skills/.github/ISSUE_TEMPLATE/new-skill.md b/personas/_shared/anthropic-cybersecurity-skills/.github/ISSUE_TEMPLATE/new-skill.md new file mode 100644 index 0000000..33d3130 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/.github/ISSUE_TEMPLATE/new-skill.md @@ -0,0 +1,25 @@ +--- +name: Add new skill +about: Propose a new cybersecurity skill for the database +title: '[NEW SKILL] skill-name-here' +labels: 'new-skill, good first issue' +assignees: '' +--- + +## Skill name (kebab-case) + + +## Domain / Subdomain + + +## Description + + +## MITRE ATT&CK techniques + + +## NIST CSF function + + +## Why is this skill needed? + diff --git a/personas/_shared/anthropic-cybersecurity-skills/.github/ISSUE_TEMPLATE/skill-improvement.yml b/personas/_shared/anthropic-cybersecurity-skills/.github/ISSUE_TEMPLATE/skill-improvement.yml new file mode 100644 index 0000000..96afb5f --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/.github/ISSUE_TEMPLATE/skill-improvement.yml @@ -0,0 +1,41 @@ +name: Skill Improvement +description: Suggest improvements to an existing skill +title: "[Improvement]: " +labels: ["enhancement", "skill-improvement"] +body: + - type: input + id: skill-name + attributes: + label: Skill Name + placeholder: e.g., analyzing-network-traffic-with-wireshark + validations: + required: true + - type: dropdown + id: improvement-type + attributes: + label: Type of Improvement + options: + - More accurate/updated instructions + - Better workflow steps + - Add missing tools or commands + - Improve description for agent discovery + - Add MITRE ATT&CK mapping + - Add NIST CSF alignment + - Improve scripts/assets + - Fix outdated content + validations: + required: true + - type: textarea + id: current-issue + attributes: + label: Current Issue + description: What is wrong or missing? + validations: + required: true + - type: textarea + id: suggested-improvement + attributes: + label: Suggested Improvement + description: What should be changed or added? + validations: + required: true diff --git a/personas/_shared/anthropic-cybersecurity-skills/.github/workflows/sync-marketplace-version.yml b/personas/_shared/anthropic-cybersecurity-skills/.github/workflows/sync-marketplace-version.yml new file mode 100644 index 0000000..281a9ff --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/.github/workflows/sync-marketplace-version.yml @@ -0,0 +1,39 @@ +name: Sync Marketplace Version on Release + +on: + release: + types: [published] + +jobs: + sync-version: + runs-on: ubuntu-latest + permissions: + contents: write + + steps: + - uses: actions/checkout@v4 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract version from tag + id: version + run: | + VERSION=${GITHUB_REF_NAME#v} + echo "version=$VERSION" >> $GITHUB_OUTPUT + echo "tag=$GITHUB_REF_NAME" >> $GITHUB_OUTPUT + + - name: Update marketplace.json version + env: + VERSION: ${{ steps.version.outputs.version }} + run: | + jq --arg v "$VERSION" '.metadata.version = $v | .plugins[].version = $v' .claude-plugin/marketplace.json > tmp.json + mv tmp.json .claude-plugin/marketplace.json + echo "Updated marketplace.json to version $VERSION" + + - name: Commit and push + run: | + git config user.name "mukul975" + git config user.email "mukuljangra5@gmail.com" + git add .claude-plugin/marketplace.json + git diff --staged --quiet || git commit -m "chore: bump marketplace version to ${{ steps.version.outputs.tag }}" + git push diff --git a/personas/_shared/anthropic-cybersecurity-skills/.github/workflows/update-index.yml b/personas/_shared/anthropic-cybersecurity-skills/.github/workflows/update-index.yml new file mode 100644 index 0000000..2910b02 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/.github/workflows/update-index.yml @@ -0,0 +1,70 @@ +name: Update marketplace index + +on: + push: + branches: [main] + paths: + - 'skills/**' + - '.github/workflows/update-index.yml' + workflow_dispatch: + +jobs: + update-index: + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - uses: actions/checkout@v4 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Regenerate index.json + run: | + python3 << 'EOF' + import os, json, re + from datetime import datetime, timezone + + skills_dir = "skills" + skills = [] + + for skill_name in sorted(os.listdir(skills_dir)): + skill_md = os.path.join(skills_dir, skill_name, "SKILL.md") + if not os.path.isfile(skill_md): + continue + with open(skill_md, "r", encoding="utf-8") as f: + content = f.read() + fm_match = re.match(r"^---\n(.*?)\n---", content, re.DOTALL) + description = "" + if fm_match: + m = re.search(r"^description:\s*(.+)$", fm_match.group(1), re.MULTILINE) + if m: + description = m.group(1).strip().strip('"') + skills.append({ + "name": skill_name, + "description": description, + "domain": "cybersecurity", + "path": f"skills/{skill_name}" + }) + + index = { + "version": "1.1.0", + "generated_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"), + "repository": "https://github.com/mukul975/Anthropic-Cybersecurity-Skills", + "domain": "cybersecurity", + "total_skills": len(skills), + "skills": skills + } + + with open("index.json", "w", encoding="utf-8") as f: + json.dump(index, f, separators=(',', ':')) + + print(f"Updated index.json: {len(skills)} skills") + EOF + + - name: Commit updated index + run: | + git config user.name "mukul975" + git config user.email "mukuljangra5@gmail.com" + git add index.json + git diff --staged --quiet || git commit -m "chore: auto-update index.json" + git push diff --git a/personas/_shared/anthropic-cybersecurity-skills/.github/workflows/validate-skills.yml b/personas/_shared/anthropic-cybersecurity-skills/.github/workflows/validate-skills.yml new file mode 100644 index 0000000..5497aef --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/.github/workflows/validate-skills.yml @@ -0,0 +1,128 @@ +name: Validate SKILL.md files + +on: + push: + paths: + - 'skills/**' + pull_request: + paths: + - 'skills/**' + +jobs: + validate: + runs-on: ubuntu-latest + name: Validate SKILL.md frontmatter + steps: + - uses: actions/checkout@v4 + + - name: Validate SKILL.md frontmatter with Python + run: | + python3 << 'EOF' + import os + import re + import sys + + REQUIRED_FIELDS = ['name', 'description', 'domain', 'subdomain', 'tags', 'version', 'author', 'license'] + errors = [] + checked = 0 + + for root, dirs, files in os.walk('skills'): + for file in files: + if file == 'SKILL.md': + path = os.path.join(root, file) + checked += 1 + with open(path, 'r', encoding='utf-8') as f: + content = f.read() + + # Check frontmatter exists + fm_match = re.match(r'^---\n(.*?)\n---', content, re.DOTALL) + if not fm_match: + errors.append(f"{path}: Missing YAML frontmatter") + continue + + fm = fm_match.group(1) + + # Check required fields + for field in REQUIRED_FIELDS: + if not re.search(rf'^{field}:', fm, re.MULTILINE): + errors.append(f"{path}: Missing required field '{field}'") + + # Check name format (kebab-case) + name_match = re.search(r'^name:\s*(.+)$', fm, re.MULTILINE) + if name_match: + name = name_match.group(1).strip().strip('"') + if not re.match(r'^[a-z0-9-]+$', name): + errors.append(f"{path}: Name '{name}' must be kebab-case") + if len(name) > 64: + errors.append(f"{path}: Name '{name}' exceeds 64 characters") + + print(f"Checked {checked} SKILL.md files") + + if errors: + print(f"\n{len(errors)} validation error(s):") + for e in errors: + print(f" ❌ {e}") + sys.exit(1) + else: + print(f"✅ All {checked} skills valid") + EOF + + - name: Check for duplicate skill names + run: | + python3 << 'EOF' + import os + import re + from collections import Counter + + names = [] + for root, dirs, files in os.walk('skills'): + for file in files: + if file == 'SKILL.md': + path = os.path.join(root, file) + with open(path, 'r', encoding='utf-8') as f: + content = f.read() + fm_match = re.match(r'^---\n(.*?)\n---', content, re.DOTALL) + if fm_match: + name_match = re.search(r'^name:\s*(.+)$', fm_match.group(1), re.MULTILINE) + if name_match: + names.append(name_match.group(1).strip().strip('"')) + + duplicates = [name for name, count in Counter(names).items() if count > 1] + if duplicates: + print(f"❌ Duplicate skill names found: {duplicates}") + exit(1) + print(f"✅ No duplicate names in {len(names)} skills") + EOF + + - name: Report skill counts + if: always() + run: | + echo "## Skill Database Stats" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + python3 << 'EOF' + import os + import re + from collections import Counter + + subdomain_counts = Counter() + total = 0 + for root, dirs, files in os.walk('skills'): + for file in files: + if file == 'SKILL.md': + total += 1 + path = os.path.join(root, file) + with open(path, 'r', encoding='utf-8') as f: + content = f.read() + fm_match = re.match(r'^---\n(.*?)\n---', content, re.DOTALL) + if fm_match: + sd_match = re.search(r'^subdomain:\s*(.+)$', fm_match.group(1), re.MULTILINE) + if sd_match: + subdomain_counts[sd_match.group(1).strip()] += 1 + + print(f"**Total Skills: {total}**") + print("") + print("| Subdomain | Count |") + print("|-----------|-------|") + for sd, count in sorted(subdomain_counts.items(), key=lambda x: -x[1]): + print(f"| {sd} | {count} |") + EOF diff --git a/personas/_shared/anthropic-cybersecurity-skills/ATTACK_COVERAGE.md b/personas/_shared/anthropic-cybersecurity-skills/ATTACK_COVERAGE.md new file mode 100644 index 0000000..4fcc2bd --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/ATTACK_COVERAGE.md @@ -0,0 +1,509 @@ +# MITRE ATT&CK Coverage Map + +

+ MITRE ATT&CK + Techniques + Tactics +

+ +This document maps all **291 unique MITRE ATT&CK techniques** (across **149 parent techniques**) referenced in our **753+ cybersecurity skills** to the 14 Enterprise ATT&CK tactics. Use this to identify coverage gaps, plan detection engineering priorities, or validate your security program against the ATT&CK framework. + +> **How to read this:** Each technique links to its official ATT&CK page. Skills listed under each technique are the ones in this repository that teach detection, hunting, exploitation, or response for that technique. + +--- + +## Coverage Summary + +| Tactic | Techniques | Coverage | +|:-------|:---------:|:---------| +| 🔎 **Reconnaissance** | **12** | `████████████░░░░░░░░░░░░░░░░░░` | +| 🏗️ **Resource Development** | **7** | `███████░░░░░░░░░░░░░░░░░░░░░░░` | +| 🚪 **Initial Access** | **18** | `██████████████████░░░░░░░░░░░░` | +| ⚡ **Execution** | **18** | `██████████████████░░░░░░░░░░░░` | +| 🔩 **Persistence** | **36** | `██████████████████████████████` | +| ⬆️ **Privilege Escalation** | **11** | `███████████░░░░░░░░░░░░░░░░░░░` | +| 🥷 **Defense Evasion** | **48** | `██████████████████████████████` | +| 🔑 **Credential Access** | **27** | `███████████████████████████░░░` | +| 🗺️ **Discovery** | **20** | `████████████████████░░░░░░░░░░` | +| ↔️ **Lateral Movement** | **9** | `█████████░░░░░░░░░░░░░░░░░░░░░` | +| 📦 **Collection** | **13** | `█████████████░░░░░░░░░░░░░░░░░` | +| 📡 **Command and Control** | **20** | `████████████████████░░░░░░░░░░` | +| 📤 **Exfiltration** | **12** | `████████████░░░░░░░░░░░░░░░░░░` | +| 💥 **Impact** | **6** | `██████░░░░░░░░░░░░░░░░░░░░░░░░` | +| 🔧 **Other/Cross-tactic** | **34** | | +| | **291** | **Total unique techniques** | + +--- + +## 🔎 Reconnaissance + +**12 techniques covered** + +| Technique | Skills | +|:----------|:-------| +| [T1589](https://attack.mitre.org/techniques/T1589/) | `conducting-full-scope-red-team-engagement`, `conducting-social-engineering-pretext-call`, `performing-open-source-intelligence-gathering` | +| [T1590](https://attack.mitre.org/techniques/T1590/) | `performing-open-source-intelligence-gathering` | +| [T1591](https://attack.mitre.org/techniques/T1591/) | `collecting-open-source-intelligence`, `conducting-social-engineering-pretext-call`, `performing-open-source-intelligence-gathering` | +| [T1592](https://attack.mitre.org/techniques/T1592/) | `performing-open-source-intelligence-gathering` | +| [T1593](https://attack.mitre.org/techniques/T1593/) | `conducting-full-scope-red-team-engagement`, `performing-open-source-intelligence-gathering` | +| [T1594](https://attack.mitre.org/techniques/T1594/) | `performing-open-source-intelligence-gathering` | +| [T1595](https://attack.mitre.org/techniques/T1595/) | `executing-red-team-engagement-planning`, `triaging-security-incident` | +| [T1595.001](https://attack.mitre.org/techniques/T1595/001/) | `performing-open-source-intelligence-gathering` | +| [T1595.002](https://attack.mitre.org/techniques/T1595/002/) | `performing-open-source-intelligence-gathering` | +| [T1596](https://attack.mitre.org/techniques/T1596/) | `performing-open-source-intelligence-gathering` | +| [T1598](https://attack.mitre.org/techniques/T1598/) | `conducting-social-engineering-pretext-call` | +| [T1598.003](https://attack.mitre.org/techniques/T1598/003/) | `conducting-social-engineering-pretext-call`, `conducting-spearphishing-simulation-campaign` | + +--- + +## 🏗️ Resource Development + +**7 techniques covered** + +| Technique | Skills | +|:----------|:-------| +| [T1583.001](https://attack.mitre.org/techniques/T1583/001/) | `building-red-team-c2-infrastructure-with-havoc`, `conducting-full-scope-red-team-engagement`, `conducting-spearphishing-simulation-campaign`, `implementing-mitre-attack-coverage-mapping` | +| [T1583.003](https://attack.mitre.org/techniques/T1583/003/) | `building-red-team-c2-infrastructure-with-havoc` | +| [T1584.001](https://attack.mitre.org/techniques/T1584/001/) | `hunting-for-dns-based-persistence` | +| [T1585.002](https://attack.mitre.org/techniques/T1585/002/) | `conducting-spearphishing-simulation-campaign` | +| [T1587.001](https://attack.mitre.org/techniques/T1587/001/) | `building-red-team-c2-infrastructure-with-havoc`, `conducting-full-scope-red-team-engagement` | +| [T1608.001](https://attack.mitre.org/techniques/T1608/001/) | `conducting-spearphishing-simulation-campaign` | +| [T1608.005](https://attack.mitre.org/techniques/T1608/005/) | `conducting-spearphishing-simulation-campaign` | + +--- + +## 🚪 Initial Access + +**18 techniques covered** + +| Technique | Skills | +|:----------|:-------| +| [T1078](https://attack.mitre.org/techniques/T1078/) | `analyzing-apt-group-with-mitre-navigator`, `analyzing-powershell-script-block-logging`, `analyzing-windows-event-logs-in-splunk`, `building-threat-hunt-hypothesis-framework`, `conducting-full-scope-red-team-engagement` +13 more | +| [T1078.001](https://attack.mitre.org/techniques/T1078/001/) | `detecting-service-account-abuse` | +| [T1078.002](https://attack.mitre.org/techniques/T1078/002/) | `conducting-domain-persistence-with-dcsync`, `detecting-service-account-abuse`, `exploiting-active-directory-certificate-services-esc1`, `exploiting-constrained-delegation-abuse`, `exploiting-nopac-cve-2021-42278-42287` +1 more | +| [T1078.003](https://attack.mitre.org/techniques/T1078/003/) | `performing-privilege-escalation-assessment` | +| [T1078.004](https://attack.mitre.org/techniques/T1078/004/) | `detecting-azure-lateral-movement`, `detecting-azure-service-principal-abuse`, `implementing-mitre-attack-coverage-mapping`, `implementing-threat-modeling-with-mitre-attack` | +| [T1091](https://attack.mitre.org/techniques/T1091/) | `executing-red-team-engagement-planning`, `performing-physical-intrusion-assessment` | +| [T1133](https://attack.mitre.org/techniques/T1133/) | `executing-red-team-engagement-planning`, `performing-threat-landscape-assessment-for-sector` | +| [T1190](https://attack.mitre.org/techniques/T1190/) | `conducting-full-scope-red-team-engagement`, `executing-red-team-engagement-planning`, `exploiting-ms17-010-eternalblue-vulnerability`, `hunting-for-webshell-activity`, `performing-threat-landscape-assessment-for-sector` +1 more | +| [T1195](https://attack.mitre.org/techniques/T1195/) | `analyzing-supply-chain-malware-artifacts`, `performing-threat-landscape-assessment-for-sector` | +| [T1195.001](https://attack.mitre.org/techniques/T1195/001/) | `hunting-for-supply-chain-compromise` | +| [T1195.002](https://attack.mitre.org/techniques/T1195/002/) | `hunting-for-supply-chain-compromise` | +| [T1199](https://attack.mitre.org/techniques/T1199/) | `hunting-for-supply-chain-compromise`, `performing-physical-intrusion-assessment` | +| [T1200](https://attack.mitre.org/techniques/T1200/) | `executing-red-team-engagement-planning`, `performing-physical-intrusion-assessment` | +| [T1566](https://attack.mitre.org/techniques/T1566/) | `analyzing-apt-group-with-mitre-navigator`, `analyzing-threat-actor-ttps-with-mitre-attack`, `analyzing-threat-landscape-with-misp`, `building-attack-pattern-library-from-cti-reports`, `hunting-advanced-persistent-threats` +3 more | +| [T1566.001](https://attack.mitre.org/techniques/T1566/001/) | `analyzing-apt-group-with-mitre-navigator`, `analyzing-campaign-attribution-evidence`, `analyzing-macro-malware-in-office-documents`, `analyzing-threat-actor-ttps-with-mitre-navigator`, `building-attack-pattern-library-from-cti-reports` +13 more | +| [T1566.002](https://attack.mitre.org/techniques/T1566/002/) | `building-attack-pattern-library-from-cti-reports`, `conducting-spearphishing-simulation-campaign`, `hunting-for-spearphishing-indicators`, `implementing-continuous-security-validation-with-bas`, `implementing-mitre-attack-coverage-mapping` +1 more | +| [T1566.003](https://attack.mitre.org/techniques/T1566/003/) | `conducting-spearphishing-simulation-campaign`, `hunting-for-spearphishing-indicators`, `implementing-continuous-security-validation-with-bas` | +| [T1566.004](https://attack.mitre.org/techniques/T1566/004/) | `conducting-social-engineering-pretext-call` | + +--- + +## ⚡ Execution + +**18 techniques covered** + +| Technique | Skills | +|:----------|:-------| +| [T1047](https://attack.mitre.org/techniques/T1047/) | `conducting-full-scope-red-team-engagement`, `detecting-fileless-attacks-on-endpoints`, `detecting-lateral-movement-with-splunk`, `detecting-living-off-the-land-attacks`, `detecting-living-off-the-land-with-lolbas` +8 more | +| [T1053](https://attack.mitre.org/techniques/T1053/) | `analyzing-apt-group-with-mitre-navigator`, `analyzing-persistence-mechanisms-in-linux`, `hunting-advanced-persistent-threats`, `hunting-for-persistence-mechanisms-in-windows`, `implementing-mitre-attack-coverage-mapping` +4 more | +| [T1053.002](https://attack.mitre.org/techniques/T1053/002/) | `hunting-for-scheduled-task-persistence` | +| [T1053.003](https://attack.mitre.org/techniques/T1053/003/) | `analyzing-persistence-mechanisms-in-linux`, `hunting-for-scheduled-task-persistence`, `performing-privilege-escalation-assessment`, `performing-privilege-escalation-on-linux` | +| [T1053.005](https://attack.mitre.org/techniques/T1053/005/) | `analyzing-apt-group-with-mitre-navigator`, `analyzing-campaign-attribution-evidence`, `analyzing-windows-event-logs-in-splunk`, `building-attack-pattern-library-from-cti-reports`, `building-detection-rule-with-splunk-spl` +17 more | +| [T1059](https://attack.mitre.org/techniques/T1059/) | `analyzing-apt-group-with-mitre-navigator`, `analyzing-threat-actor-ttps-with-mitre-attack`, `analyzing-windows-event-logs-in-splunk`, `building-incident-timeline-with-timesketch`, `deobfuscating-powershell-obfuscated-malware` +7 more | +| [T1059.001](https://attack.mitre.org/techniques/T1059/001/) | `analyzing-apt-group-with-mitre-navigator`, `analyzing-campaign-attribution-evidence`, `analyzing-macro-malware-in-office-documents`, `analyzing-powershell-empire-artifacts`, `analyzing-powershell-script-block-logging` +29 more | +| [T1059.003](https://attack.mitre.org/techniques/T1059/003/) | `building-attack-pattern-library-from-cti-reports`, `building-detection-rule-with-splunk-spl`, `detecting-suspicious-powershell-execution`, `mapping-mitre-attack-techniques`, `performing-purple-team-atomic-testing` | +| [T1059.004](https://attack.mitre.org/techniques/T1059/004/) | `performing-purple-team-atomic-testing` | +| [T1059.005](https://attack.mitre.org/techniques/T1059/005/) | `analyzing-macro-malware-in-office-documents`, `detecting-living-off-the-land-attacks`, `executing-red-team-exercise`, `hunting-for-living-off-the-land-binaries`, `hunting-for-lolbins-execution-in-endpoint-logs` +2 more | +| [T1059.006](https://attack.mitre.org/techniques/T1059/006/) | `performing-purple-team-atomic-testing` | +| [T1059.007](https://attack.mitre.org/techniques/T1059/007/) | `performing-purple-team-atomic-testing` | +| [T1129](https://attack.mitre.org/techniques/T1129/) | `performing-purple-team-atomic-testing` | +| [T1203](https://attack.mitre.org/techniques/T1203/) | `performing-purple-team-atomic-testing` | +| [T1204.001](https://attack.mitre.org/techniques/T1204/001/) | `conducting-spearphishing-simulation-campaign` | +| [T1204.002](https://attack.mitre.org/techniques/T1204/002/) | `analyzing-macro-malware-in-office-documents`, `conducting-full-scope-red-team-engagement`, `conducting-spearphishing-simulation-campaign`, `detecting-living-off-the-land-attacks`, `executing-red-team-engagement-planning` +4 more | +| [T1569](https://attack.mitre.org/techniques/T1569/) | `performing-purple-team-atomic-testing` | +| [T1569.002](https://attack.mitre.org/techniques/T1569/002/) | `detecting-lateral-movement-in-network`, `detecting-lateral-movement-with-splunk`, `exploiting-ms17-010-eternalblue-vulnerability`, `performing-purple-team-atomic-testing` | + +--- + +## 🔩 Persistence + +**36 techniques covered** + +| Technique | Skills | +|:----------|:-------| +| [T1098](https://attack.mitre.org/techniques/T1098/) | `analyzing-windows-event-logs-in-splunk`, `conducting-domain-persistence-with-dcsync`, `hunting-for-t1098-account-manipulation`, `implementing-mitre-attack-coverage-mapping`, `implementing-siem-use-cases-for-detection` +1 more | +| [T1098.001](https://attack.mitre.org/techniques/T1098/001/) | `conducting-cloud-penetration-testing`, `detecting-azure-lateral-movement`, `detecting-azure-service-principal-abuse`, `hunting-for-t1098-account-manipulation`, `implementing-mitre-attack-coverage-mapping` | +| [T1098.002](https://attack.mitre.org/techniques/T1098/002/) | `detecting-azure-lateral-movement`, `detecting-email-forwarding-rules-attack` | +| [T1098.004](https://attack.mitre.org/techniques/T1098/004/) | `analyzing-persistence-mechanisms-in-linux`, `implementing-security-monitoring-with-datadog` | +| [T1136](https://attack.mitre.org/techniques/T1136/) | `detecting-privilege-escalation-in-kubernetes-pods`, `implementing-mitre-attack-coverage-mapping`, `performing-purple-team-atomic-testing` | +| [T1136.001](https://attack.mitre.org/techniques/T1136/001/) | `analyzing-windows-event-logs-in-splunk`, `performing-purple-team-atomic-testing` | +| [T1136.002](https://attack.mitre.org/techniques/T1136/002/) | `exploiting-nopac-cve-2021-42278-42287` | +| [T1197](https://attack.mitre.org/techniques/T1197/) | `detecting-living-off-the-land-attacks`, `detecting-living-off-the-land-with-lolbas`, `hunting-for-living-off-the-land-binaries`, `hunting-for-lolbins-execution-in-endpoint-logs`, `performing-purple-team-atomic-testing` | +| [T1505](https://attack.mitre.org/techniques/T1505/) | `performing-purple-team-atomic-testing` | +| [T1505.003](https://attack.mitre.org/techniques/T1505/003/) | `building-attack-pattern-library-from-cti-reports`, `hunting-for-webshell-activity`, `performing-purple-team-atomic-testing` | +| [T1542.001](https://attack.mitre.org/techniques/T1542/001/) | `analyzing-uefi-bootkit-persistence` | +| [T1542.003](https://attack.mitre.org/techniques/T1542/003/) | `analyzing-uefi-bootkit-persistence` | +| [T1543](https://attack.mitre.org/techniques/T1543/) | `analyzing-persistence-mechanisms-in-linux`, `hunting-for-persistence-mechanisms-in-windows`, `performing-purple-team-atomic-testing` | +| [T1543.002](https://attack.mitre.org/techniques/T1543/002/) | `analyzing-persistence-mechanisms-in-linux`, `performing-privilege-escalation-on-linux` | +| [T1543.003](https://attack.mitre.org/techniques/T1543/003/) | `detecting-lateral-movement-with-splunk`, `detecting-living-off-the-land-attacks`, `detecting-privilege-escalation-attempts`, `hunting-for-persistence-mechanisms-in-windows`, `hunting-for-unusual-service-installations` +2 more | +| [T1546](https://attack.mitre.org/techniques/T1546/) | `analyzing-persistence-mechanisms-in-linux`, `performing-purple-team-atomic-testing` | +| [T1546.001](https://attack.mitre.org/techniques/T1546/001/) | `performing-purple-team-atomic-testing` | +| [T1546.003](https://attack.mitre.org/techniques/T1546/003/) | `analyzing-windows-event-logs-in-splunk`, `detecting-fileless-attacks-on-endpoints`, `detecting-fileless-malware-techniques`, `detecting-wmi-persistence`, `hunting-for-lateral-movement-via-wmi` +3 more | +| [T1546.004](https://attack.mitre.org/techniques/T1546/004/) | `analyzing-persistence-mechanisms-in-linux` | +| [T1546.010](https://attack.mitre.org/techniques/T1546/010/) | `hunting-for-persistence-mechanisms-in-windows` | +| [T1546.012](https://attack.mitre.org/techniques/T1546/012/) | `hunting-for-persistence-mechanisms-in-windows`, `hunting-for-registry-persistence-mechanisms` | +| [T1546.015](https://attack.mitre.org/techniques/T1546/015/) | `hunting-for-persistence-mechanisms-in-windows`, `hunting-for-registry-persistence-mechanisms` | +| [T1547](https://attack.mitre.org/techniques/T1547/) | `analyzing-apt-group-with-mitre-navigator`, `analyzing-malware-persistence-with-autoruns`, `hunting-advanced-persistent-threats`, `hunting-for-persistence-mechanisms-in-windows`, `implementing-siem-use-cases-for-detection` +3 more | +| [T1547.001](https://attack.mitre.org/techniques/T1547/001/) | `analyzing-apt-group-with-mitre-navigator`, `analyzing-windows-event-logs-in-splunk`, `building-attack-pattern-library-from-cti-reports`, `conducting-full-scope-red-team-engagement`, `detecting-fileless-attacks-on-endpoints` +10 more | +| [T1547.004](https://attack.mitre.org/techniques/T1547/004/) | `hunting-for-persistence-mechanisms-in-windows`, `hunting-for-registry-persistence-mechanisms`, `performing-purple-team-atomic-testing` | +| [T1547.005](https://attack.mitre.org/techniques/T1547/005/) | `hunting-for-persistence-mechanisms-in-windows` | +| [T1547.009](https://attack.mitre.org/techniques/T1547/009/) | `performing-purple-team-atomic-testing` | +| [T1556](https://attack.mitre.org/techniques/T1556/) | `performing-initial-access-with-evilginx3` | +| [T1556.007](https://attack.mitre.org/techniques/T1556/007/) | `detecting-azure-lateral-movement` | +| [T1574](https://attack.mitre.org/techniques/T1574/) | `analyzing-persistence-mechanisms-in-linux`, `performing-purple-team-atomic-testing` | +| [T1574.001](https://attack.mitre.org/techniques/T1574/001/) | `detecting-dll-sideloading-attacks`, `hunting-for-persistence-mechanisms-in-windows`, `performing-purple-team-atomic-testing` | +| [T1574.002](https://attack.mitre.org/techniques/T1574/002/) | `analyzing-windows-event-logs-in-splunk`, `building-attack-pattern-library-from-cti-reports`, `detecting-dll-sideloading-attacks`, `implementing-siem-use-cases-for-detection`, `performing-purple-team-atomic-testing` | +| [T1574.006](https://attack.mitre.org/techniques/T1574/006/) | `analyzing-persistence-mechanisms-in-linux`, `detecting-dll-sideloading-attacks`, `performing-privilege-escalation-on-linux` | +| [T1574.008](https://attack.mitre.org/techniques/T1574/008/) | `detecting-dll-sideloading-attacks` | +| [T1574.009](https://attack.mitre.org/techniques/T1574/009/) | `detecting-privilege-escalation-attempts` | +| [T1574.011](https://attack.mitre.org/techniques/T1574/011/) | `detecting-privilege-escalation-attempts` | + +--- + +## ⬆️ Privilege Escalation + +**11 techniques covered** + +| Technique | Skills | +|:----------|:-------| +| [T1068](https://attack.mitre.org/techniques/T1068/) | `conducting-full-scope-red-team-engagement`, `detecting-container-escape-attempts`, `detecting-privilege-escalation-attempts`, `detecting-privilege-escalation-in-kubernetes-pods`, `executing-red-team-engagement-planning` +5 more | +| [T1134](https://attack.mitre.org/techniques/T1134/) | `analyzing-windows-event-logs-in-splunk`, `detecting-privilege-escalation-attempts` | +| [T1134.001](https://attack.mitre.org/techniques/T1134/001/) | `detecting-privilege-escalation-attempts`, `exploiting-constrained-delegation-abuse`, `performing-purple-team-atomic-testing` | +| [T1134.005](https://attack.mitre.org/techniques/T1134/005/) | `hunting-for-t1098-account-manipulation`, `performing-active-directory-compromise-investigation` | +| [T1484](https://attack.mitre.org/techniques/T1484/) | `exploiting-active-directory-certificate-services-esc1`, `performing-active-directory-vulnerability-assessment` | +| [T1484.001](https://attack.mitre.org/techniques/T1484/001/) | `deploying-active-directory-honeytokens`, `performing-active-directory-compromise-investigation` | +| [T1548](https://attack.mitre.org/techniques/T1548/) | `detecting-container-escape-attempts`, `detecting-privilege-escalation-in-kubernetes-pods`, `detecting-t1548-abuse-elevation-control-mechanism`, `performing-privilege-escalation-assessment` | +| [T1548.001](https://attack.mitre.org/techniques/T1548/001/) | `detecting-privilege-escalation-attempts`, `detecting-privilege-escalation-in-kubernetes-pods`, `detecting-t1548-abuse-elevation-control-mechanism`, `performing-privilege-escalation-assessment`, `performing-privilege-escalation-on-linux` | +| [T1548.002](https://attack.mitre.org/techniques/T1548/002/) | `conducting-full-scope-red-team-engagement`, `detecting-privilege-escalation-attempts`, `detecting-t1548-abuse-elevation-control-mechanism`, `performing-purple-team-atomic-testing` | +| [T1548.003](https://attack.mitre.org/techniques/T1548/003/) | `detecting-privilege-escalation-attempts`, `detecting-t1548-abuse-elevation-control-mechanism`, `performing-privilege-escalation-assessment`, `performing-privilege-escalation-on-linux` | +| [T1548.004](https://attack.mitre.org/techniques/T1548/004/) | `detecting-t1548-abuse-elevation-control-mechanism` | + +--- + +## 🥷 Defense Evasion + +**48 techniques covered** + +| Technique | Skills | +|:----------|:-------| +| [T1027](https://attack.mitre.org/techniques/T1027/) | `analyzing-apt-group-with-mitre-navigator`, `analyzing-powershell-empire-artifacts`, `analyzing-powershell-script-block-logging`, `building-attack-pattern-library-from-cti-reports`, `conducting-full-scope-red-team-engagement` +3 more | +| [T1036](https://attack.mitre.org/techniques/T1036/) | `detecting-evasion-techniques-in-endpoint-logs`, `implementing-mitre-attack-coverage-mapping`, `implementing-siem-use-cases-for-detection`, `performing-purple-team-atomic-testing` | +| [T1036.005](https://attack.mitre.org/techniques/T1036/005/) | `detecting-process-injection-techniques`, `performing-purple-team-atomic-testing` | +| [T1055](https://attack.mitre.org/techniques/T1055/) | `building-attack-pattern-library-from-cti-reports`, `building-red-team-c2-infrastructure-with-havoc`, `conducting-full-scope-red-team-engagement`, `detecting-evasion-techniques-in-endpoint-logs`, `detecting-fileless-attacks-on-endpoints` +13 more | +| [T1055.001](https://attack.mitre.org/techniques/T1055/001/) | `detecting-process-hollowing-technique`, `detecting-process-injection-techniques`, `detecting-t1055-process-injection-with-sysmon`, `hunting-for-process-injection-techniques`, `performing-purple-team-atomic-testing` +1 more | +| [T1055.002](https://attack.mitre.org/techniques/T1055/002/) | `detecting-process-injection-techniques`, `detecting-t1055-process-injection-with-sysmon` | +| [T1055.003](https://attack.mitre.org/techniques/T1055/003/) | `detecting-process-hollowing-technique`, `detecting-process-injection-techniques`, `detecting-t1055-process-injection-with-sysmon`, `performing-purple-team-atomic-testing` | +| [T1055.004](https://attack.mitre.org/techniques/T1055/004/) | `detecting-process-hollowing-technique`, `detecting-process-injection-techniques`, `detecting-t1055-process-injection-with-sysmon`, `hunting-for-process-injection-techniques` | +| [T1055.005](https://attack.mitre.org/techniques/T1055/005/) | `detecting-process-injection-techniques`, `detecting-t1055-process-injection-with-sysmon` | +| [T1055.008](https://attack.mitre.org/techniques/T1055/008/) | `detecting-process-injection-techniques` | +| [T1055.009](https://attack.mitre.org/techniques/T1055/009/) | `detecting-process-injection-techniques` | +| [T1055.011](https://attack.mitre.org/techniques/T1055/011/) | `detecting-process-injection-techniques` | +| [T1055.012](https://attack.mitre.org/techniques/T1055/012/) | `conducting-malware-incident-response`, `detecting-fileless-malware-techniques`, `detecting-process-hollowing-technique`, `detecting-process-injection-techniques`, `detecting-t1055-process-injection-with-sysmon` +2 more | +| [T1055.013](https://attack.mitre.org/techniques/T1055/013/) | `detecting-process-hollowing-technique`, `detecting-process-injection-techniques`, `detecting-t1055-process-injection-with-sysmon` | +| [T1055.014](https://attack.mitre.org/techniques/T1055/014/) | `detecting-process-injection-techniques` | +| [T1055.015](https://attack.mitre.org/techniques/T1055/015/) | `detecting-process-injection-techniques`, `detecting-t1055-process-injection-with-sysmon` | +| [T1070](https://attack.mitre.org/techniques/T1070/) | `detecting-evasion-techniques-in-endpoint-logs`, `implementing-siem-use-cases-for-detection`, `implementing-velociraptor-for-ir-collection`, `performing-purple-team-atomic-testing` | +| [T1070.001](https://attack.mitre.org/techniques/T1070/001/) | `detecting-evasion-techniques-in-endpoint-logs`, `implementing-mitre-attack-coverage-mapping`, `performing-purple-team-atomic-testing`, `performing-purple-team-exercise` | +| [T1070.004](https://attack.mitre.org/techniques/T1070/004/) | `implementing-threat-modeling-with-mitre-attack`, `performing-purple-team-atomic-testing` | +| [T1070.006](https://attack.mitre.org/techniques/T1070/006/) | `detecting-evasion-techniques-in-endpoint-logs`, `hunting-for-defense-evasion-via-timestomping` | +| [T1112](https://attack.mitre.org/techniques/T1112/) | `detecting-fileless-malware-techniques`, `performing-purple-team-atomic-testing` | +| [T1127](https://attack.mitre.org/techniques/T1127/) | `detecting-evasion-techniques-in-endpoint-logs`, `detecting-living-off-the-land-with-lolbas`, `hunting-for-lolbins-execution-in-endpoint-logs` | +| [T1127.001](https://attack.mitre.org/techniques/T1127/001/) | `detecting-living-off-the-land-attacks`, `detecting-living-off-the-land-with-lolbas`, `hunting-for-lolbins-execution-in-endpoint-logs` | +| [T1140](https://attack.mitre.org/techniques/T1140/) | `analyzing-powershell-script-block-logging`, `detecting-fileless-attacks-on-endpoints`, `detecting-living-off-the-land-with-lolbas`, `hunting-for-living-off-the-land-binaries`, `hunting-for-lolbins-execution-in-endpoint-logs` +1 more | +| [T1202](https://attack.mitre.org/techniques/T1202/) | `hunting-for-living-off-the-land-binaries`, `hunting-for-lolbins-execution-in-endpoint-logs` | +| [T1218](https://attack.mitre.org/techniques/T1218/) | `detecting-evasion-techniques-in-endpoint-logs`, `detecting-living-off-the-land-attacks`, `detecting-living-off-the-land-with-lolbas`, `hunting-advanced-persistent-threats`, `hunting-for-living-off-the-land-binaries` +3 more | +| [T1218.001](https://attack.mitre.org/techniques/T1218/001/) | `hunting-for-living-off-the-land-binaries`, `hunting-for-lolbins-execution-in-endpoint-logs`, `performing-purple-team-atomic-testing` | +| [T1218.002](https://attack.mitre.org/techniques/T1218/002/) | `hunting-for-living-off-the-land-binaries` | +| [T1218.003](https://attack.mitre.org/techniques/T1218/003/) | `detecting-living-off-the-land-attacks`, `detecting-living-off-the-land-with-lolbas`, `hunting-for-living-off-the-land-binaries`, `hunting-for-lolbins-execution-in-endpoint-logs`, `performing-purple-team-atomic-testing` | +| [T1218.004](https://attack.mitre.org/techniques/T1218/004/) | `detecting-living-off-the-land-attacks`, `hunting-for-lolbins-execution-in-endpoint-logs` | +| [T1218.005](https://attack.mitre.org/techniques/T1218/005/) | `detecting-fileless-malware-techniques`, `detecting-living-off-the-land-attacks`, `detecting-living-off-the-land-with-lolbas`, `hunting-for-living-off-the-land-binaries`, `hunting-for-lolbins-execution-in-endpoint-logs` +1 more | +| [T1218.007](https://attack.mitre.org/techniques/T1218/007/) | `hunting-for-living-off-the-land-binaries`, `hunting-for-lolbins-execution-in-endpoint-logs` | +| [T1218.010](https://attack.mitre.org/techniques/T1218/010/) | `detecting-living-off-the-land-attacks`, `detecting-living-off-the-land-with-lolbas`, `hunting-for-living-off-the-land-binaries`, `hunting-for-lolbins-execution-in-endpoint-logs`, `performing-purple-team-atomic-testing` | +| [T1218.011](https://attack.mitre.org/techniques/T1218/011/) | `detecting-living-off-the-land-attacks`, `detecting-living-off-the-land-with-lolbas`, `hunting-for-living-off-the-land-binaries`, `hunting-for-lolbins-execution-in-endpoint-logs`, `performing-dynamic-analysis-with-any-run` +1 more | +| [T1218.013](https://attack.mitre.org/techniques/T1218/013/) | `detecting-living-off-the-land-attacks` | +| [T1222.001](https://attack.mitre.org/techniques/T1222/001/) | `conducting-domain-persistence-with-dcsync` | +| [T1497](https://attack.mitre.org/techniques/T1497/) | `analyzing-malware-sandbox-evasion-techniques` | +| [T1497.001](https://attack.mitre.org/techniques/T1497/001/) | `analyzing-malware-sandbox-evasion-techniques` | +| [T1497.002](https://attack.mitre.org/techniques/T1497/002/) | `analyzing-malware-sandbox-evasion-techniques` | +| [T1497.003](https://attack.mitre.org/techniques/T1497/003/) | `analyzing-malware-sandbox-evasion-techniques` | +| [T1550](https://attack.mitre.org/techniques/T1550/) | `performing-lateral-movement-detection` | +| [T1550.001](https://attack.mitre.org/techniques/T1550/001/) | `detecting-azure-lateral-movement` | +| [T1550.002](https://attack.mitre.org/techniques/T1550/002/) | `analyzing-windows-event-logs-in-splunk`, `building-attack-pattern-library-from-cti-reports`, `conducting-full-scope-red-team-engagement`, `detecting-lateral-movement-in-network`, `detecting-lateral-movement-with-splunk` +6 more | +| [T1550.003](https://attack.mitre.org/techniques/T1550/003/) | `conducting-pass-the-ticket-attack`, `detecting-pass-the-hash-attacks`, `detecting-pass-the-ticket-attacks`, `exploiting-constrained-delegation-abuse` | +| [T1550.004](https://attack.mitre.org/techniques/T1550/004/) | `performing-initial-access-with-evilginx3` | +| [T1562](https://attack.mitre.org/techniques/T1562/) | `detecting-evasion-techniques-in-endpoint-logs`, `performing-purple-team-atomic-testing` | +| [T1562.001](https://attack.mitre.org/techniques/T1562/001/) | `analyzing-powershell-script-block-logging`, `building-attack-pattern-library-from-cti-reports`, `detecting-evasion-techniques-in-endpoint-logs`, `detecting-fileless-attacks-on-endpoints`, `detecting-suspicious-powershell-execution` +1 more | +| [T1610](https://attack.mitre.org/techniques/T1610/) | `detecting-container-escape-attempts`, `detecting-container-escape-with-falco-rules` | + +--- + +## 🔑 Credential Access + +**27 techniques covered** + +| Technique | Skills | +|:----------|:-------| +| [T1003](https://attack.mitre.org/techniques/T1003/) | `analyzing-powershell-script-block-logging`, `building-attack-pattern-library-from-cti-reports`, `building-detection-rules-with-sigma`, `detecting-container-escape-with-falco-rules`, `detecting-credential-dumping-techniques` +10 more | +| [T1003.001](https://attack.mitre.org/techniques/T1003/001/) | `analyzing-campaign-attribution-evidence`, `analyzing-powershell-script-block-logging`, `analyzing-windows-event-logs-in-splunk`, `building-attack-pattern-library-from-cti-reports`, `building-detection-rule-with-splunk-spl` +13 more | +| [T1003.002](https://attack.mitre.org/techniques/T1003/002/) | `detecting-credential-dumping-techniques`, `detecting-t1003-credential-dumping-with-edr`, `performing-purple-team-atomic-testing` | +| [T1003.003](https://attack.mitre.org/techniques/T1003/003/) | `detecting-credential-dumping-techniques`, `detecting-t1003-credential-dumping-with-edr`, `performing-purple-team-atomic-testing` | +| [T1003.004](https://attack.mitre.org/techniques/T1003/004/) | `detecting-t1003-credential-dumping-with-edr`, `performing-credential-access-with-lazagne`, `performing-purple-team-atomic-testing` | +| [T1003.005](https://attack.mitre.org/techniques/T1003/005/) | `detecting-t1003-credential-dumping-with-edr`, `performing-purple-team-atomic-testing` | +| [T1003.006](https://attack.mitre.org/techniques/T1003/006/) | `analyzing-windows-event-logs-in-splunk`, `conducting-domain-persistence-with-dcsync`, `conducting-full-scope-red-team-engagement`, `conducting-internal-network-penetration-test`, `detecting-dcsync-attack-in-active-directory` +8 more | +| [T1110](https://attack.mitre.org/techniques/T1110/) | `analyzing-windows-event-logs-in-splunk`, `building-detection-rule-with-splunk-spl`, `conducting-internal-network-penetration-test`, `implementing-mitre-attack-coverage-mapping`, `implementing-siem-use-cases-for-detection` +3 more | +| [T1110.001](https://attack.mitre.org/techniques/T1110/001/) | `analyzing-windows-event-logs-in-splunk`, `building-detection-rule-with-splunk-spl`, `implementing-siem-use-cases-for-detection`, `performing-false-positive-reduction-in-siem`, `performing-purple-team-atomic-testing` | +| [T1110.002](https://attack.mitre.org/techniques/T1110/002/) | `exploiting-kerberoasting-with-impacket` | +| [T1110.003](https://attack.mitre.org/techniques/T1110/003/) | `detecting-pass-the-ticket-attacks`, `implementing-siem-use-cases-for-detection`, `performing-purple-team-atomic-testing` | +| [T1187](https://attack.mitre.org/techniques/T1187/) | `detecting-ntlm-relay-with-event-correlation` | +| [T1528](https://attack.mitre.org/techniques/T1528/) | `detecting-azure-lateral-movement`, `detecting-azure-service-principal-abuse` | +| [T1539](https://attack.mitre.org/techniques/T1539/) | `performing-credential-access-with-lazagne`, `performing-initial-access-with-evilginx3` | +| [T1552](https://attack.mitre.org/techniques/T1552/) | `performing-cloud-incident-containment-procedures`, `performing-purple-team-atomic-testing` | +| [T1552.001](https://attack.mitre.org/techniques/T1552/001/) | `performing-credential-access-with-lazagne`, `performing-purple-team-atomic-testing` | +| [T1552.002](https://attack.mitre.org/techniques/T1552/002/) | `performing-credential-access-with-lazagne` | +| [T1552.005](https://attack.mitre.org/techniques/T1552/005/) | `conducting-cloud-penetration-testing` | +| [T1552.006](https://attack.mitre.org/techniques/T1552/006/) | `deploying-active-directory-honeytokens` | +| [T1557](https://attack.mitre.org/techniques/T1557/) | `performing-initial-access-with-evilginx3` | +| [T1557.001](https://attack.mitre.org/techniques/T1557/001/) | `conducting-internal-network-penetration-test`, `detecting-ntlm-relay-with-event-correlation`, `hunting-for-ntlm-relay-attacks` | +| [T1558](https://attack.mitre.org/techniques/T1558/) | `analyzing-windows-event-logs-in-splunk`, `conducting-pass-the-ticket-attack`, `exploiting-kerberoasting-with-impacket`, `exploiting-nopac-cve-2021-42278-42287`, `performing-lateral-movement-detection` +1 more | +| [T1558.001](https://attack.mitre.org/techniques/T1558/001/) | `analyzing-windows-event-logs-in-splunk`, `conducting-domain-persistence-with-dcsync`, `detecting-golden-ticket-attacks-in-kerberos-logs`, `detecting-golden-ticket-forgery`, `detecting-kerberoasting-attacks` +3 more | +| [T1558.002](https://attack.mitre.org/techniques/T1558/002/) | `performing-active-directory-compromise-investigation` | +| [T1558.003](https://attack.mitre.org/techniques/T1558/003/) | `analyzing-windows-event-logs-in-splunk`, `building-attack-pattern-library-from-cti-reports`, `conducting-full-scope-red-team-engagement`, `conducting-internal-network-penetration-test`, `deploying-active-directory-honeytokens` +12 more | +| [T1558.004](https://attack.mitre.org/techniques/T1558/004/) | `detecting-kerberoasting-attacks` | +| [T1649](https://attack.mitre.org/techniques/T1649/) | `exploiting-active-directory-certificate-services-esc1` | + +--- + +## 🗺️ Discovery + +**20 techniques covered** + +| Technique | Skills | +|:----------|:-------| +| [T1016](https://attack.mitre.org/techniques/T1016/) | `conducting-full-scope-red-team-engagement`, `conducting-internal-reconnaissance-with-bloodhound-ce`, `exploiting-active-directory-with-bloodhound`, `performing-purple-team-atomic-testing` | +| [T1018](https://attack.mitre.org/techniques/T1018/) | `conducting-full-scope-red-team-engagement`, `conducting-internal-reconnaissance-with-bloodhound-ce`, `detecting-network-scanning-with-ids-signatures`, `exploiting-active-directory-with-bloodhound`, `performing-active-directory-bloodhound-analysis` | +| [T1033](https://attack.mitre.org/techniques/T1033/) | `conducting-internal-reconnaissance-with-bloodhound-ce`, `detecting-privilege-escalation-attempts`, `exploiting-active-directory-with-bloodhound`, `performing-purple-team-atomic-testing` | +| [T1040](https://attack.mitre.org/techniques/T1040/) | `implementing-continuous-security-validation-with-bas` | +| [T1046](https://attack.mitre.org/techniques/T1046/) | `detecting-network-scanning-with-ids-signatures`, `detecting-privilege-escalation-attempts`, `performing-packet-injection-attack`, `triaging-security-incident` | +| [T1049](https://attack.mitre.org/techniques/T1049/) | `performing-purple-team-atomic-testing` | +| [T1057](https://attack.mitre.org/techniques/T1057/) | `performing-purple-team-atomic-testing` | +| [T1069](https://attack.mitre.org/techniques/T1069/) | `performing-purple-team-atomic-testing` | +| [T1069.001](https://attack.mitre.org/techniques/T1069/001/) | `performing-active-directory-bloodhound-analysis`, `performing-purple-team-atomic-testing` | +| [T1069.002](https://attack.mitre.org/techniques/T1069/002/) | `conducting-internal-reconnaissance-with-bloodhound-ce`, `exploiting-active-directory-with-bloodhound`, `performing-active-directory-bloodhound-analysis`, `performing-kerberoasting-attack`, `performing-purple-team-atomic-testing` | +| [T1082](https://attack.mitre.org/techniques/T1082/) | `conducting-full-scope-red-team-engagement`, `performing-purple-team-atomic-testing` | +| [T1083](https://attack.mitre.org/techniques/T1083/) | `implementing-canary-tokens-for-network-intrusion`, `performing-purple-team-atomic-testing` | +| [T1087](https://attack.mitre.org/techniques/T1087/) | `conducting-full-scope-red-team-engagement`, `executing-red-team-engagement-planning`, `implementing-continuous-security-validation-with-bas`, `performing-purple-team-atomic-testing` | +| [T1087.001](https://attack.mitre.org/techniques/T1087/001/) | `performing-purple-team-atomic-testing` | +| [T1087.002](https://attack.mitre.org/techniques/T1087/002/) | `conducting-internal-reconnaissance-with-bloodhound-ce`, `deploying-active-directory-honeytokens`, `exploiting-active-directory-certificate-services-esc1`, `exploiting-active-directory-with-bloodhound`, `exploiting-kerberoasting-with-impacket` +3 more | +| [T1087.004](https://attack.mitre.org/techniques/T1087/004/) | `detecting-azure-service-principal-abuse`, `implementing-mitre-attack-coverage-mapping` | +| [T1482](https://attack.mitre.org/techniques/T1482/) | `conducting-internal-reconnaissance-with-bloodhound-ce`, `exploiting-active-directory-with-bloodhound`, `performing-active-directory-bloodhound-analysis` | +| [T1518](https://attack.mitre.org/techniques/T1518/) | `performing-purple-team-atomic-testing` | +| [T1518.001](https://attack.mitre.org/techniques/T1518/001/) | `performing-purple-team-atomic-testing` | +| [T1580](https://attack.mitre.org/techniques/T1580/) | `implementing-mitre-attack-coverage-mapping` | + +--- + +## ↔️ Lateral Movement + +**9 techniques covered** + +| Technique | Skills | +|:----------|:-------| +| [T1021](https://attack.mitre.org/techniques/T1021/) | `detecting-lateral-movement-in-network`, `detecting-lateral-movement-with-splunk`, `detecting-service-account-abuse`, `executing-red-team-engagement-planning`, `exploiting-constrained-delegation-abuse` +10 more | +| [T1021.001](https://attack.mitre.org/techniques/T1021/001/) | `analyzing-campaign-attribution-evidence`, `analyzing-windows-event-logs-in-splunk`, `building-attack-pattern-library-from-cti-reports`, `building-detection-rule-with-splunk-spl`, `building-threat-hunt-hypothesis-framework` +8 more | +| [T1021.002](https://attack.mitre.org/techniques/T1021/002/) | `analyzing-windows-event-logs-in-splunk`, `building-attack-pattern-library-from-cti-reports`, `building-detection-rule-with-splunk-spl`, `conducting-full-scope-red-team-engagement`, `conducting-internal-network-penetration-test` +10 more | +| [T1021.003](https://attack.mitre.org/techniques/T1021/003/) | `detecting-lateral-movement-with-splunk`, `hunting-for-dcom-lateral-movement`, `performing-lateral-movement-detection`, `performing-lateral-movement-with-wmiexec`, `performing-purple-team-atomic-testing` | +| [T1021.004](https://attack.mitre.org/techniques/T1021/004/) | `detecting-lateral-movement-with-splunk`, `performing-purple-team-atomic-testing` | +| [T1021.006](https://attack.mitre.org/techniques/T1021/006/) | `building-attack-pattern-library-from-cti-reports`, `detecting-lateral-movement-with-splunk`, `performing-lateral-movement-detection`, `performing-purple-team-atomic-testing` | +| [T1210](https://attack.mitre.org/techniques/T1210/) | `exploiting-ms17-010-eternalblue-vulnerability`, `exploiting-zerologon-vulnerability-cve-2020-1472` | +| [T1534](https://attack.mitre.org/techniques/T1534/) | `implementing-mitre-attack-coverage-mapping` | +| [T1570](https://attack.mitre.org/techniques/T1570/) | `detecting-lateral-movement-in-network`, `detecting-lateral-movement-with-splunk`, `performing-lateral-movement-with-wmiexec`, `performing-purple-team-atomic-testing` | + +--- + +## 📦 Collection + +**13 techniques covered** + +| Technique | Skills | +|:----------|:-------| +| [T1005](https://attack.mitre.org/techniques/T1005/) | `conducting-malware-incident-response`, `detecting-container-escape-with-falco-rules`, `performing-purple-team-atomic-testing` | +| [T1039](https://attack.mitre.org/techniques/T1039/) | `performing-purple-team-atomic-testing` | +| [T1074](https://attack.mitre.org/techniques/T1074/) | `building-attack-pattern-library-from-cti-reports`, `executing-red-team-exercise`, `hunting-for-data-staging-before-exfiltration` | +| [T1074.001](https://attack.mitre.org/techniques/T1074/001/) | `hunting-for-data-staging-before-exfiltration`, `performing-purple-team-atomic-testing` | +| [T1074.002](https://attack.mitre.org/techniques/T1074/002/) | `hunting-for-data-staging-before-exfiltration` | +| [T1113](https://attack.mitre.org/techniques/T1113/) | `performing-purple-team-atomic-testing` | +| [T1114.002](https://attack.mitre.org/techniques/T1114/002/) | `detecting-email-forwarding-rules-attack` | +| [T1114.003](https://attack.mitre.org/techniques/T1114/003/) | `detecting-business-email-compromise`, `detecting-email-forwarding-rules-attack` | +| [T1115](https://attack.mitre.org/techniques/T1115/) | `performing-purple-team-atomic-testing` | +| [T1213](https://attack.mitre.org/techniques/T1213/) | `conducting-full-scope-red-team-engagement` | +| [T1530](https://attack.mitre.org/techniques/T1530/) | `detecting-insider-threat-behaviors`, `implementing-mitre-attack-coverage-mapping`, `performing-cloud-incident-containment-procedures` | +| [T1560](https://attack.mitre.org/techniques/T1560/) | `conducting-full-scope-red-team-engagement`, `hunting-for-data-staging-before-exfiltration` | +| [T1560.001](https://attack.mitre.org/techniques/T1560/001/) | `hunting-for-data-staging-before-exfiltration`, `performing-purple-team-atomic-testing` | + +--- + +## 📡 Command and Control + +**20 techniques covered** + +| Technique | Skills | +|:----------|:-------| +| [T1071](https://attack.mitre.org/techniques/T1071/) | `analyzing-apt-group-with-mitre-navigator`, `analyzing-network-covert-channels-in-malware`, `analyzing-ransomware-network-indicators`, `analyzing-threat-actor-ttps-with-mitre-attack`, `hunting-advanced-persistent-threats` +6 more | +| [T1071.001](https://attack.mitre.org/techniques/T1071/001/) | `analyzing-apt-group-with-mitre-navigator`, `analyzing-campaign-attribution-evidence`, `analyzing-powershell-empire-artifacts`, `analyzing-powershell-script-block-logging`, `building-attack-pattern-library-from-cti-reports` +13 more | +| [T1071.004](https://attack.mitre.org/techniques/T1071/004/) | `building-attack-pattern-library-from-cti-reports`, `building-c2-infrastructure-with-sliver-framework`, `hunting-for-beaconing-with-frequency-analysis`, `hunting-for-command-and-control-beaconing`, `hunting-for-dns-tunneling-with-zeek` +3 more | +| [T1090](https://attack.mitre.org/techniques/T1090/) | `implementing-mitre-attack-coverage-mapping`, `performing-purple-team-atomic-testing` | +| [T1090.001](https://attack.mitre.org/techniques/T1090/001/) | `performing-purple-team-atomic-testing` | +| [T1090.002](https://attack.mitre.org/techniques/T1090/002/) | `building-c2-infrastructure-with-sliver-framework`, `building-red-team-c2-infrastructure-with-havoc` | +| [T1090.004](https://attack.mitre.org/techniques/T1090/004/) | `hunting-for-domain-fronting-c2-traffic` | +| [T1095](https://attack.mitre.org/techniques/T1095/) | `hunting-for-command-and-control-beaconing`, `hunting-for-unusual-network-connections` | +| [T1102](https://attack.mitre.org/techniques/T1102/) | `hunting-for-living-off-the-cloud-techniques` | +| [T1105](https://attack.mitre.org/techniques/T1105/) | `analyzing-powershell-script-block-logging`, `building-attack-pattern-library-from-cti-reports`, `building-c2-infrastructure-with-sliver-framework`, `building-red-team-c2-infrastructure-with-havoc`, `detecting-fileless-attacks-on-endpoints` +7 more | +| [T1132](https://attack.mitre.org/techniques/T1132/) | `hunting-for-command-and-control-beaconing`, `performing-purple-team-atomic-testing` | +| [T1132.001](https://attack.mitre.org/techniques/T1132/001/) | `building-c2-infrastructure-with-sliver-framework`, `performing-purple-team-atomic-testing` | +| [T1219](https://attack.mitre.org/techniques/T1219/) | `performing-purple-team-atomic-testing` | +| [T1568](https://attack.mitre.org/techniques/T1568/) | `hunting-for-command-and-control-beaconing`, `implementing-mitre-attack-coverage-mapping` | +| [T1568.002](https://attack.mitre.org/techniques/T1568/002/) | `hunting-for-beaconing-with-frequency-analysis` | +| [T1571](https://attack.mitre.org/techniques/T1571/) | `hunting-for-unusual-network-connections`, `implementing-mitre-attack-coverage-mapping` | +| [T1572](https://attack.mitre.org/techniques/T1572/) | `building-c2-infrastructure-with-sliver-framework`, `hunting-for-command-and-control-beaconing`, `hunting-for-dns-tunneling-with-zeek`, `implementing-mitre-attack-coverage-mapping` | +| [T1573](https://attack.mitre.org/techniques/T1573/) | `analyzing-ransomware-network-indicators`, `hunting-for-beaconing-with-frequency-analysis`, `hunting-for-command-and-control-beaconing`, `implementing-mitre-attack-coverage-mapping`, `performing-purple-team-atomic-testing` | +| [T1573.001](https://attack.mitre.org/techniques/T1573/001/) | `performing-purple-team-atomic-testing` | +| [T1573.002](https://attack.mitre.org/techniques/T1573/002/) | `building-c2-infrastructure-with-sliver-framework`, `building-red-team-c2-infrastructure-with-havoc` | + +--- + +## 📤 Exfiltration + +**12 techniques covered** + +| Technique | Skills | +|:----------|:-------| +| [T1020](https://attack.mitre.org/techniques/T1020/) | `hunting-for-data-exfiltration-indicators` | +| [T1029](https://attack.mitre.org/techniques/T1029/) | `hunting-for-data-exfiltration-indicators` | +| [T1030](https://attack.mitre.org/techniques/T1030/) | `hunting-for-data-exfiltration-indicators` | +| [T1041](https://attack.mitre.org/techniques/T1041/) | `analyzing-campaign-attribution-evidence`, `analyzing-ransomware-network-indicators`, `building-attack-pattern-library-from-cti-reports`, `conducting-full-scope-red-team-engagement`, `conducting-malware-incident-response` +6 more | +| [T1048](https://attack.mitre.org/techniques/T1048/) | `building-attack-pattern-library-from-cti-reports`, `building-detection-rule-with-splunk-spl`, `conducting-full-scope-red-team-engagement`, `hunting-for-data-exfiltration-indicators`, `implementing-continuous-security-validation-with-bas` +2 more | +| [T1048.001](https://attack.mitre.org/techniques/T1048/001/) | `hunting-for-data-exfiltration-indicators` | +| [T1048.002](https://attack.mitre.org/techniques/T1048/002/) | `hunting-for-data-exfiltration-indicators` | +| [T1048.003](https://attack.mitre.org/techniques/T1048/003/) | `conducting-full-scope-red-team-engagement`, `hunting-for-data-exfiltration-indicators`, `hunting-for-dns-tunneling-with-zeek`, `implementing-continuous-security-validation-with-bas`, `implementing-mitre-attack-coverage-mapping` +2 more | +| [T1052](https://attack.mitre.org/techniques/T1052/) | `hunting-for-data-exfiltration-indicators` | +| [T1537](https://attack.mitre.org/techniques/T1537/) | `hunting-for-data-exfiltration-indicators`, `hunting-for-living-off-the-cloud-techniques`, `implementing-mitre-attack-coverage-mapping`, `implementing-threat-modeling-with-mitre-attack`, `performing-cloud-incident-containment-procedures` | +| [T1567](https://attack.mitre.org/techniques/T1567/) | `detecting-insider-threat-behaviors`, `hunting-for-data-exfiltration-indicators`, `hunting-for-living-off-the-cloud-techniques`, `implementing-continuous-security-validation-with-bas`, `performing-purple-team-atomic-testing` | +| [T1567.002](https://attack.mitre.org/techniques/T1567/002/) | `hunting-for-data-exfiltration-indicators`, `performing-purple-team-atomic-testing` | + +--- + +## 💥 Impact + +**6 techniques covered** + +| Technique | Skills | +|:----------|:-------| +| [T1485](https://attack.mitre.org/techniques/T1485/) | `hunting-for-shadow-copy-deletion`, `performing-purple-team-atomic-testing` | +| [T1486](https://attack.mitre.org/techniques/T1486/) | `analyzing-ransomware-network-indicators`, `building-attack-pattern-library-from-cti-reports`, `building-threat-hunt-hypothesis-framework`, `conducting-full-scope-red-team-engagement`, `hunting-for-shadow-copy-deletion` +7 more | +| [T1489](https://attack.mitre.org/techniques/T1489/) | `conducting-full-scope-red-team-engagement`, `performing-purple-team-atomic-testing` | +| [T1490](https://attack.mitre.org/techniques/T1490/) | `building-soc-playbook-for-ransomware`, `hunting-for-shadow-copy-deletion`, `performing-purple-team-atomic-testing`, `performing-purple-team-exercise` | +| [T1491](https://attack.mitre.org/techniques/T1491/) | `performing-purple-team-atomic-testing` | +| [T1491.002](https://attack.mitre.org/techniques/T1491/002/) | `performing-purple-team-atomic-testing` | + +--- + +## 🔧 Other / Cross-Tactic Techniques + +| Technique | Skills | +|:----------|:-------| +| T0157 | `exploiting-kerberoasting-with-impacket` | +| T0200 | `building-vulnerability-scanning-workflow`, `performing-authenticated-scan-with-openvas` | +| T0802 | `detecting-attacks-on-historian-servers` | +| T0809 | `detecting-attacks-on-historian-servers` | +| T0814 | `detecting-modbus-command-injection-attacks` | +| T0816 | `detecting-dnp3-protocol-anomalies` | +| T0830 | `detecting-modbus-protocol-anomalies` | +| T0831 | `detecting-modbus-protocol-anomalies` | +| T0832 | `detecting-attacks-on-historian-servers` | +| T0833 | `detecting-stuxnet-style-attacks` | +| T0836 | `detecting-modbus-command-injection-attacks`, `detecting-modbus-protocol-anomalies`, `detecting-stuxnet-style-attacks` | +| T0839 | `detecting-dnp3-protocol-anomalies`, `detecting-stuxnet-style-attacks` | +| T0843 | `detecting-modbus-command-injection-attacks`, `performing-s7comm-protocol-security-analysis` | +| T0847 | `detecting-stuxnet-style-attacks` | +| T0855 | `detecting-dnp3-protocol-anomalies`, `detecting-modbus-command-injection-attacks`, `detecting-modbus-protocol-anomalies` | +| T0856 | `detecting-stuxnet-style-attacks` | +| T0862 | `detecting-stuxnet-style-attacks` | +| T0866 | `detecting-stuxnet-style-attacks` | +| T0869 | `detecting-dnp3-protocol-anomalies` | +| T0881 | `performing-s7comm-protocol-security-analysis` | +| T0886 | `detecting-modbus-protocol-anomalies` | +| T1404 | `analyzing-android-malware-with-apktool` | +| T1417 | `analyzing-android-malware-with-apktool` | +| T1418 | `analyzing-android-malware-with-apktool` | +| T1553.006 | `analyzing-uefi-bootkit-persistence` | +| T1555 | `performing-credential-access-with-lazagne`, `performing-purple-team-atomic-testing` | +| T1555.003 | `performing-credential-access-with-lazagne`, `performing-purple-team-atomic-testing` | +| T1555.004 | `performing-credential-access-with-lazagne` | +| T1578 | `performing-cloud-incident-containment-procedures` | +| T1582 | `analyzing-android-malware-with-apktool` | +| T1611 | `detecting-container-escape-attempts`, `detecting-container-escape-with-falco-rules` | +| T1615 | `conducting-internal-reconnaissance-with-bloodhound-ce`, `exploiting-active-directory-with-bloodhound`, `performing-active-directory-bloodhound-analysis` | +| T1620 | `detecting-fileless-attacks-on-endpoints` | +| T5577 | `performing-physical-intrusion-assessment` | + +--- + +## How This Was Generated + +This coverage map was automatically generated by scanning all 753+ SKILL.md and agent.py files for MITRE ATT&CK technique IDs (pattern: `T####` and `T####.###`). Each technique was mapped to its parent tactic using the [MITRE ATT&CK Enterprise Matrix v16](https://attack.mitre.org/matrices/enterprise/). + +To regenerate: `python3 extract_attack.py` + +--- + +## MITRE ATLAS Coverage (v5.5.0) + +81 skills mapped to ATLAS adversarial ML techniques. + +Key techniques applied: +- AML.T0051 — LLM Prompt Injection (Execution) +- AML.T0054 — LLM Jailbreak (Privilege Escalation) +- AML.T0088 — Generate Deepfakes (AI Attack Staging) +- AML.T0010 — AI Supply Chain Compromise (Initial Access) +- AML.T0020 — Poison Training Data (Resource Development) +- AML.T0070 — RAG Poisoning (Persistence) +- AML.T0080 — AI Agent Context Poisoning (Persistence) +- AML.T0056 — Extract LLM System Prompt (Exfiltration) + +## MITRE D3FEND Coverage (v1.3) + +11 skills mapped to D3FEND defensive countermeasures. + +Countermeasures applied span D3FEND tactical categories: +Harden, Detect, Isolate, Deceive, Evict, Restore. +Each skill's d3fend_techniques field lists the top 5 most relevant +defensive countermeasures derived from the skill's ATT&CK technique tags. + +## NIST AI RMF Coverage (AI 100-1) + +85 skills mapped to NIST AI Risk Management Framework subcategories. + +Core functions covered: +- GOVERN: Organizational accountability for AI risk (GOVERN-1.1, GOVERN-6.1, GOVERN-6.2) +- MAP: AI risk identification and context (MAP-5.1, MAP-5.2, MAP-1.6) +- MEASURE: AI risk analysis and evaluation (MEASURE-2.5, MEASURE-2.7, MEASURE-2.8, MEASURE-2.11) +- MANAGE: AI risk response and recovery (MANAGE-2.4, MANAGE-3.1) + +GenAI-specific subcategories applied: GOVERN-6.1, GOVERN-6.2 (responsible deployment policies). + +--- + +

+ Part of Anthropic Cybersecurity Skills — 753+ open-source cybersecurity skills for AI agents +

\ No newline at end of file diff --git a/personas/_shared/anthropic-cybersecurity-skills/CITATION.cff b/personas/_shared/anthropic-cybersecurity-skills/CITATION.cff new file mode 100644 index 0000000..807b051 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/CITATION.cff @@ -0,0 +1,32 @@ +cff-version: 1.2.0 +message: "If you use this repository in your research, tools, or publications, please cite it as below." +type: software +title: "Anthropic-Cybersecurity-Skills" +abstract: > + A structured collection of 753 cybersecurity skills for AI agents, covering + penetration testing, digital forensics, threat intelligence, incident response, + cloud security, OT/SCADA security, AI security, and more. Each skill follows + a standardized format with YAML frontmatter metadata, step-by-step procedures, + tool commands, expected outputs, and MITRE ATT&CK mappings. Compatible with + Claude Code, GitHub Copilot, Cursor, Windsurf, Gemini CLI, and 20+ AI agent + platforms. +authors: + - name: "Mahipal" + email: mukuljangra5@gmail.com + alias: mukul975 +repository-code: "https://github.com/mukul975/Anthropic-Cybersecurity-Skills" +url: "https://github.com/mukul975/Anthropic-Cybersecurity-Skills" +license: Apache-2.0 +version: "1.1.0" +date-released: "2026-03-21" +keywords: + - cybersecurity + - AI agents + - skills + - penetration testing + - digital forensics + - threat intelligence + - incident response + - MITRE ATT&CK + - Claude Code + - open source diff --git a/personas/_shared/anthropic-cybersecurity-skills/CODE_OF_CONDUCT.md b/personas/_shared/anthropic-cybersecurity-skills/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..90a4a45 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/CODE_OF_CONDUCT.md @@ -0,0 +1,83 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at mukuljangra5@gmail.com. All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of actions. + +**Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.1, available at [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at [https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/personas/_shared/anthropic-cybersecurity-skills/CONTRIBUTING.md b/personas/_shared/anthropic-cybersecurity-skills/CONTRIBUTING.md new file mode 100644 index 0000000..11fde20 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/CONTRIBUTING.md @@ -0,0 +1,74 @@ +# Contributing to Anthropic-Cybersecurity-Skills + +## How to add a new skill + +1. Create a new directory: `skills/your-skill-name/` +2. Add a `SKILL.md` file with required YAML frontmatter: + ```yaml + --- + name: your-skill-name + description: >- + Clear description of what this skill does and when + an AI agent should activate it. Include keywords. + domain: cybersecurity + subdomain: [category] + tags: [tag1, tag2, tag3] + version: "1.0" + author: your-github-username + license: Apache-2.0 + --- + ``` +3. Write clear, step-by-step instructions in the Markdown body using these sections: + - ## When to Use + - ## Prerequisites + - ## Workflow (numbered steps with real commands) + - ## Key Concepts (table) + - ## Tools & Systems + - ## Common Scenarios + - ## Output Format +4. (Optional) Add supporting files: + - `references/standards.md` — Real standard numbers, CVE refs, NIST/MITRE links + - `references/workflows.md` — Deep technical procedure + - `scripts/process.py` — Real working helper script + - `assets/template.md` — Real filled-in checklist/template +5. Submit a PR with title: `Add skill: your-skill-name` + +## Skill quality checklist +- [ ] Name is lowercase with hyphens (kebab-case), 1–64 characters +- [ ] Description is clear and includes agent-discovery keywords +- [ ] Instructions are actionable with real commands and tool names +- [ ] Domain and subdomain are set correctly +- [ ] Tags include relevant tools, frameworks, and techniques + +## Subdomains +Choose the most appropriate subdomain for your skill: +- web-application-security +- network-security +- penetration-testing +- red-teaming +- digital-forensics +- malware-analysis +- threat-intelligence +- cloud-security +- container-security +- identity-access-management +- cryptography +- vulnerability-management +- compliance-governance +- zero-trust-architecture +- ot-ics-security +- devsecops +- soc-operations +- incident-response +- phishing-defense +- ransomware-defense +- api-security +- mobile-security +- endpoint-security +- threat-hunting + +## Code of Conduct +This project follows the [Contributor Covenant](CODE_OF_CONDUCT.md). By participating, you agree to uphold this code. + +## License +By contributing, you agree that your contributions will be licensed under Apache-2.0. diff --git a/personas/_shared/anthropic-cybersecurity-skills/LICENSE b/personas/_shared/anthropic-cybersecurity-skills/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/anthropic-cybersecurity-skills/README.md b/personas/_shared/anthropic-cybersecurity-skills/README.md new file mode 100644 index 0000000..ca75208 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/README.md @@ -0,0 +1,358 @@ +

+ Anthropic Cybersecurity Skills +

+ +
+ +# Anthropic Cybersecurity Skills + +### The largest open-source cybersecurity skills library for AI agents + +[![License](https://img.shields.io/badge/License-Apache_2.0-blue.svg?style=flat-square)](LICENSE) +[![Skills](https://img.shields.io/badge/skills-754-brightgreen?style=flat-square)](#whats-inside--26-security-domains) +[![Frameworks](https://img.shields.io/badge/frameworks-5-orange?style=flat-square)](#five-frameworks-one-skill-library) +[![Domains](https://img.shields.io/badge/domains-26-9cf?style=flat-square)](#whats-inside--26-security-domains) +[![Platforms](https://img.shields.io/badge/platforms-26%2B-blueviolet?style=flat-square)](#compatible-platforms) +[![GitHub stars](https://img.shields.io/github/stars/mukul975/Anthropic-Cybersecurity-Skills?style=flat-square)](https://github.com/mukul975/Anthropic-Cybersecurity-Skills/stargazers) +[![GitHub forks](https://img.shields.io/github/forks/mukul975/Anthropic-Cybersecurity-Skills?style=flat-square)](https://github.com/mukul975/Anthropic-Cybersecurity-Skills/network/members) +[![Last Commit](https://img.shields.io/github/last-commit/mukul975/Anthropic-Cybersecurity-Skills?style=flat-square)](https://github.com/mukul975/Anthropic-Cybersecurity-Skills/commits/main) +[![agentskills.io](https://img.shields.io/badge/standard-agentskills.io-ff6600?style=flat-square)](https://agentskills.io) +[![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=flat-square)](CONTRIBUTING.md) + +**754 production-grade cybersecurity skills · 26 security domains · 5 framework mappings · 26+ AI platforms** + +[Get Started](#quick-start) · [What's Inside](#whats-inside--26-security-domains) · [Frameworks](#five-frameworks-one-skill-library) · [Platforms](#compatible-platforms) · [Contributing](#contributing) + +
+ +--- + +> ⚠️ **Community Project** — This is an independent, community-created project. Not affiliated with Anthropic PBC. + +## Give any AI agent the security skills of a senior analyst + +A junior analyst knows which Volatility3 plugin to run on a suspicious memory dump, which Sigma rules catch Kerberoasting, and how to scope a cloud breach across three providers. **Your AI agent doesn't — unless you give it these skills.** + +This repo contains **754 structured cybersecurity skills** spanning **26 security domains**, each following the [agentskills.io](https://agentskills.io) open standard. Every skill is mapped to **five industry frameworks** — MITRE ATT&CK, NIST CSF 2.0, MITRE ATLAS, MITRE D3FEND, and NIST AI RMF — making this the only open-source skills library with unified cross-framework coverage. Clone it, point your agent at it, and your next security investigation gets expert-level guidance in seconds. + +## Five frameworks, one skill library + +No other open-source skills library maps every skill to all five frameworks. One skill, five compliance checkboxes. + +| Framework | Version | Scope in this repo | What it maps | +|---|---|---|---| +| [MITRE ATT&CK](https://attack.mitre.org) | v18 | 14 tactics · 200+ techniques | Adversary behaviors and TTPs | +| [NIST CSF 2.0](https://www.nist.gov/cyberframework) | 2.0 | 6 functions · 22 categories | Organizational security posture | +| [MITRE ATLAS](https://atlas.mitre.org) | v5.4 | 16 tactics · 84 techniques | AI/ML adversarial threats | +| [MITRE D3FEND](https://d3fend.mitre.org) | v1.3 | 7 categories · 267 techniques | Defensive countermeasures | +| [NIST AI RMF](https://airc.nist.gov/AI_RMF) | 1.0 | 4 functions · 72 subcategories | AI risk management | + +**Example — a single skill maps across all five:** + +| Skill | ATT&CK | NIST CSF | ATLAS | D3FEND | AI RMF | +|---|---|---|---|---|---| +| `analyzing-network-traffic-of-malware` | T1071 | DE.CM | AML.T0047 | D3-NTA | MEASURE-2.6 | + +## Quick start + +```bash +# Option 1: npx (recommended) +npx skills add mukul975/Anthropic-Cybersecurity-Skills + +# Option 2: Git clone +git clone https://github.com/mukul975/Anthropic-Cybersecurity-Skills.git +cd Anthropic-Cybersecurity-Skills +``` + +Works immediately with Claude Code, GitHub Copilot, OpenAI Codex CLI, Cursor, Gemini CLI, and any [agentskills.io](https://agentskills.io)-compatible platform. + +## Why this exists + +The cybersecurity workforce gap hit **4.8 million unfilled roles** globally in 2024 (ISC2). AI agents can help close that gap — but only if they have structured domain knowledge to work from. Today's agents can write code and search the web, but they lack the practitioner playbooks that turn a generic LLM into a capable security analyst. + +Existing security tool repos give you wordlists, payloads, or exploit code. None of them give an AI agent the structured decision-making workflow a senior analyst follows: when to use each technique, what prerequisites to check, how to execute step-by-step, and how to verify results. That is the gap this project fills. + +**Anthropic Cybersecurity Skills** is not a collection of scripts or checklists. It is an **AI-native knowledge base** built from the ground up for the agentskills.io standard — YAML frontmatter for sub-second discovery, structured Markdown for step-by-step execution, and reference files for deep technical context. Every skill encodes real practitioner workflows, not generated summaries. + +## What's inside — 26 security domains + +| Domain | Skills | Key capabilities | +|---|---|---| +| Cloud Security | 60 | AWS, Azure, GCP hardening · CSPM · cloud forensics | +| Threat Hunting | 55 | Hypothesis-driven hunts · LOTL detection · behavioral analytics | +| Threat Intelligence | 50 | STIX/TAXII · MISP · feed integration · actor profiling | +| Web Application Security | 42 | OWASP Top 10 · SQLi · XSS · SSRF · deserialization | +| Network Security | 40 | IDS/IPS · firewall rules · VLAN segmentation · traffic analysis | +| Malware Analysis | 39 | Static/dynamic analysis · reverse engineering · sandboxing | +| Digital Forensics | 37 | Disk imaging · memory forensics · timeline reconstruction | +| Security Operations | 36 | SIEM correlation · log analysis · alert triage | +| Identity & Access Management | 35 | IAM policies · PAM · zero trust identity · Okta · SailPoint | +| SOC Operations | 33 | Playbooks · escalation workflows · metrics · tabletop exercises | +| Container Security | 30 | K8s RBAC · image scanning · Falco · container forensics | +| OT/ICS Security | 28 | Modbus · DNP3 · IEC 62443 · historian defense · SCADA | +| API Security | 28 | GraphQL · REST · OWASP API Top 10 · WAF bypass | +| Vulnerability Management | 25 | Nessus · scanning workflows · patch prioritization · CVSS | +| Incident Response | 25 | Breach containment · ransomware response · IR playbooks | +| Red Teaming | 24 | Full-scope engagements · AD attacks · phishing simulation | +| Penetration Testing | 23 | Network · web · cloud · mobile · wireless pentesting | +| Endpoint Security | 17 | EDR · LOTL detection · fileless malware · persistence hunting | +| DevSecOps | 17 | CI/CD security · code signing · Terraform auditing | +| Phishing Defense | 16 | Email authentication · BEC detection · phishing IR | +| Cryptography | 14 | TLS · Ed25519 · certificate transparency · key management | +| Zero Trust Architecture | 13 | BeyondCorp · CISA maturity model · microsegmentation | +| Mobile Security | 12 | Android/iOS analysis · mobile pentesting · MDM forensics | +| Ransomware Defense | 7 | Precursor detection · response · recovery · encryption analysis | +| Compliance & Governance | 5 | CIS benchmarks · SOC 2 · regulatory frameworks | +| Deception Technology | 2 | Honeytokens · breach detection canaries | + +## How AI agents use these skills + +Each skill costs **~30 tokens to scan** (frontmatter only) and **500–2,000 tokens to fully load** (complete workflow). This progressive disclosure architecture lets agents search all 754 skills in a single pass without blowing context windows. + +``` +User prompt: "Analyze this memory dump for signs of credential theft" + +Agent's internal process: + + 1. Scans 754 skill frontmatters (~30 tokens each) + → identifies 12 relevant skills by matching tags, description, domain + + 2. Loads top 3 matches: + • performing-memory-forensics-with-volatility3 + • hunting-for-credential-dumping-lsass + • analyzing-windows-event-logs-for-credential-access + + 3. Executes the structured Workflow section step-by-step + → runs Volatility3 plugins, checks LSASS access patterns, + correlates with event log evidence + + 4. Validates results using the Verification section + → confirms IOCs, maps findings to ATT&CK T1003 (Credential Dumping) +``` + +**Without these skills**, the agent guesses at tool commands and misses critical steps. **With them**, it follows the same playbook a senior DFIR analyst would use. + +## Skill anatomy + +Every skill follows a consistent directory structure: + +``` +skills/performing-memory-forensics-with-volatility3/ +├── SKILL.md ← Skill definition (YAML frontmatter + Markdown body) +├── references/ +│ ├── standards.md ← MITRE ATT&CK, ATLAS, D3FEND, NIST mappings +│ └── workflows.md ← Deep technical procedure reference +├── scripts/ +│ └── process.py ← Working helper scripts +└── assets/ + └── template.md ← Filled-in checklists and report templates +``` + + +### YAML frontmatter (real example) + +```yaml +--- +name: performing-memory-forensics-with-volatility3 +description: >- + Analyze memory dumps to extract running processes, network connections, + injected code, and malware artifacts using the Volatility3 framework. +domain: cybersecurity +subdomain: digital-forensics +tags: [forensics, memory-analysis, volatility3, incident-response, dfir] +atlas_techniques: [AML.T0047] +d3fend_techniques: [D3-MA, D3-PSMD] +nist_ai_rmf: [MEASURE-2.6] +nist_csf: [DE.CM-01, RS.AN-03] +version: "1.2" +author: mukul975 +license: Apache-2.0 +--- +``` + + +### Markdown body sections + +```markdown +## When to Use +Trigger conditions — when should an AI agent activate this skill? + +## Prerequisites +Required tools, access levels, and environment setup. + +## Workflow +Step-by-step execution guide with specific commands and decision points. + +## Verification +How to confirm the skill was executed successfully. +``` + +Frontmatter fields: `name` (kebab-case, 1–64 chars), `description` (keyword-rich for agent discovery), `domain`, `subdomain`, `tags`, `atlas_techniques` (MITRE ATLAS IDs), `d3fend_techniques` (MITRE D3FEND IDs), `nist_ai_rmf` (NIST AI RMF references), `nist_csf` (NIST CSF 2.0 categories). MITRE ATT&CK technique mappings are documented in each skill's `references/standards.md` file and in the ATT&CK Navigator layer included with releases. + +
+📊 MITRE ATT&CK Enterprise coverage — all 14 tactics + +  + +| Tactic | ID | Coverage | Key skills | +|---|---|---|---| +| Reconnaissance | TA0043 | Strong | OSINT, subdomain enumeration, DNS recon | +| Resource Development | TA0042 | Moderate | Phishing infrastructure, C2 setup detection | +| Initial Access | TA0001 | Strong | Phishing simulation, exploit detection, forced browsing | +| Execution | TA0002 | Strong | PowerShell analysis, fileless malware, script block logging | +| Persistence | TA0003 | Strong | Scheduled tasks, registry, service accounts, LOTL | +| Privilege Escalation | TA0004 | Strong | Kerberoasting, AD attacks, cloud privilege escalation | +| Defense Evasion | TA0005 | Strong | Obfuscation, rootkit analysis, evasion detection | +| Credential Access | TA0006 | Strong | Mimikatz detection, pass-the-hash, credential dumping | +| Discovery | TA0007 | Moderate | BloodHound, AD enumeration, network scanning | +| Lateral Movement | TA0008 | Strong | SMB exploits, lateral movement detection with Splunk | +| Collection | TA0009 | Moderate | Email forensics, data staging detection | +| Command and Control | TA0011 | Strong | C2 beaconing, DNS tunneling, Cobalt Strike analysis | +| Exfiltration | TA0010 | Strong | DNS exfiltration, DLP controls, data loss detection | +| Impact | TA0040 | Strong | Ransomware defense, encryption analysis, recovery | + +An **ATT&CK Navigator layer file** is included in the [v1.0.0 release assets](https://github.com/mukul975/Anthropic-Cybersecurity-Skills/releases/tag/v1.0.0) for visual coverage mapping. + +> **Note:** ATT&CK v19 lands April 28, 2026 — splitting Defense Evasion (TA0005) into two new tactics: *Stealth* and *Impair Defenses*. Skill mappings will be updated in a forthcoming release. + +
+ +
+📊 NIST CSF 2.0 alignment — all 6 functions + +  + +| Function | Skills | Examples | +|---|---|---| +| **Govern (GV)** | 30+ | Risk strategy, policy frameworks, roles & responsibilities | +| **Identify (ID)** | 120+ | Asset discovery, threat landscape assessment, risk analysis | +| **Protect (PR)** | 150+ | IAM hardening, WAF rules, zero trust, encryption | +| **Detect (DE)** | 200+ | Threat hunting, SIEM correlation, anomaly detection | +| **Respond (RS)** | 160+ | Incident response, forensics, breach containment | +| **Recover (RC)** | 40+ | Ransomware recovery, BCP, disaster recovery | + +NIST CSF 2.0 (February 2024) added the **Govern** function and expanded scope from critical infrastructure to all organizations. Skill mappings align to all 22 categories and reference 106 subcategories. + +
+ +
+📊 Framework deep dive — ATLAS, D3FEND, AI RMF + +  + +### MITRE ATLAS v5.4 — AI/ML adversarial threats +ATLAS maps adversarial tactics, techniques, and case studies specific to AI and machine learning systems. Version 5.4 covers **16 tactics and 84 techniques** including agentic AI attack vectors added in late 2025: AI agent context poisoning, tool invocation abuse, MCP server compromises, and malicious agent deployment. Skills mapped to ATLAS help agents identify and defend against threats to ML pipelines, model weights, inference APIs, and autonomous workflows. + +### MITRE D3FEND v1.3 — Defensive countermeasures +D3FEND is an NSA-funded knowledge graph of **267 defensive techniques** organized across 7 tactical categories: Model, Harden, Detect, Isolate, Deceive, Evict, and Restore. Built on OWL 2 ontology, it uses a shared Digital Artifact layer to bidirectionally map defensive countermeasures to ATT&CK offensive techniques. Skills tagged with D3FEND identifiers let agents recommend specific countermeasures for detected threats. + +### NIST AI RMF 1.0 + GenAI Profile (AI 600-1) +The AI Risk Management Framework defines 4 core functions — Govern, Map, Measure, Manage — with **72 subcategories** for trustworthy AI development. The GenAI Profile (AI 600-1, July 2024) adds **12 risk categories** specific to generative AI, from confabulation and data privacy to prompt injection and supply chain risks. Colorado's AI Act (effective February 2026) provides a **legal safe harbor** for organizations complying with NIST AI RMF, making these mappings directly relevant to regulatory compliance. + +
+ +## Compatible platforms + +**AI code assistants** +Claude Code (Anthropic) · GitHub Copilot (Microsoft) · Cursor · Windsurf · Cline · Aider · Continue · Roo Code · Amazon Q Developer · Tabnine · Sourcegraph Cody · JetBrains AI + +**CLI agents** +OpenAI Codex CLI · Gemini CLI (Google) + +**Autonomous agents** +Devin · Replit Agent · SWE-agent · OpenHands + +**Agent frameworks & SDKs** +LangChain · CrewAI · AutoGen · Semantic Kernel · Haystack · Vercel AI SDK · Any MCP-compatible agent + +All platforms that support the [agentskills.io](https://agentskills.io) standard can load these skills with zero configuration. + +## What people are saying + +> *"A database of real, organized security skills that any AI agent can plug into and use. Not tutorials. Not blog posts."* +> — **[Hasan Toor (@hasantoxr)](https://x.com/hasantoxr/status/2033193922349179249)**, AI/tech creator + +> *"This is not a random collection of security scripts. It's a structured operational knowledge base designed for AI-driven security workflows."* +> — **[fazal-sec](https://fazal-sec.medium.com/claude-skills-ai-powered-cybersecurity-the-complete-guide-to-building-intelligent-security-7bb7e9d14c8e)**, Medium + +## Featured in + +| Where | Type | Link | +|---|---|---| +| **awesome-agent-skills** | Awesome List (1,000+ skills index) | [VoltAgent/awesome-agent-skills](https://github.com/VoltAgent/awesome-agent-skills) | +| **awesome-ai-security** | Awesome List (AI security tools) | [ottosulin/awesome-ai-security](https://github.com/ottosulin/awesome-ai-security) | +| **awesome-codex-cli** | Awesome List (Codex CLI resources) | [RoggeOhta/awesome-codex-cli](https://github.com/RoggeOhta/awesome-codex-cli) | +| **SkillsLLM** | Skills directory & marketplace | [skillsllm.com/skill/anthropic-cybersecurity-skills](https://skillsllm.com/skill/anthropic-cybersecurity-skills) | +| **Openflows** | Signal analysis & tracking | [openflows.org](https://openflows.org/currency/currents/anthropic-cybersecurity-skills/) | +| **NeverSight skills_feed** | Automated skills index | [NeverSight/skills_feed](https://github.com/NeverSight/skills_feed) | + +## Star history + + + + + + Star History Chart + + + +## Releases + +| Version | Date | Highlights | +|---|---|---| +| [v1.0.0](https://github.com/mukul975/Anthropic-Cybersecurity-Skills/releases/tag/v1.0.0) | March 11, 2026 | 734 skills · 26 domains · MITRE ATT&CK + NIST CSF 2.0 mapping · ATT&CK Navigator layer | + +Skills have continued to grow on `main` since v1.0.0 — the library now contains **754 skills** with **5-framework mapping** (MITRE ATLAS, D3FEND, and NIST AI RMF added post-release). Check [Releases](https://github.com/mukul975/Anthropic-Cybersecurity-Skills/releases) for the latest tagged version. + +## Contributing + +This project grows through community contributions. Here is how to get involved: + +**Add a new skill** — Domains like Deception Technology (2 skills) and Compliance & Governance (5 skills) need the most help. Follow the template in [CONTRIBUTING.md](CONTRIBUTING.md) and submit a PR with the title `Add skill: your-skill-name`. + +**Improve existing skills** — Add framework mappings, fix workflows, update tool references, or contribute scripts and templates. + +**Report issues** — Found an inaccurate procedure or broken script? [Open an issue](https://github.com/mukul975/Anthropic-Cybersecurity-Skills/issues). + +Every PR is reviewed for technical accuracy and agentskills.io standard compliance within 48 hours. Check [good first issues](https://github.com/mukul975/Anthropic-Cybersecurity-Skills/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) for a starting point. + +This project follows the [Contributor Covenant](https://www.contributor-covenant.org/). By participating, you agree to uphold this code. + +## Community + +💬 [Discussions](https://github.com/mukul975/Anthropic-Cybersecurity-Skills/discussions) — Questions, ideas, and roadmap conversations +🐛 [Issues](https://github.com/mukul975/Anthropic-Cybersecurity-Skills/issues) — Bug reports and feature requests +🔒 [Security Policy](SECURITY.md) — Responsible disclosure process (48-hour acknowledgment) + +## Citation + +If you use this project in research or publications: + +```bibtex +@software{anthropic_cybersecurity_skills, + author = {Jangra, Mahipal}, + title = {Anthropic Cybersecurity Skills}, + year = {2026}, + url = {https://github.com/mukul975/Anthropic-Cybersecurity-Skills}, + license = {Apache-2.0}, + note = {754 structured cybersecurity skills for AI agents, + mapped to MITRE ATT\&CK, NIST CSF 2.0, MITRE ATLAS, + MITRE D3FEND, and NIST AI RMF} +} +``` + +## License + +This project is licensed under the [Apache License 2.0](LICENSE). You are free to use, modify, and distribute these skills in both personal and commercial projects. + +--- + +
+ +**If this project helps your security work, consider giving it a ⭐** + +[⭐ Star](https://github.com/mukul975/Anthropic-Cybersecurity-Skills/stargazers) · [🍴 Fork](https://github.com/mukul975/Anthropic-Cybersecurity-Skills/fork) · [💬 Discuss](https://github.com/mukul975/Anthropic-Cybersecurity-Skills/discussions) · [📝 Contribute](CONTRIBUTING.md) + +Community project by [@mukul975](https://github.com/mukul975). Not affiliated with Anthropic PBC. + +
diff --git a/personas/_shared/anthropic-cybersecurity-skills/SECURITY.md b/personas/_shared/anthropic-cybersecurity-skills/SECURITY.md new file mode 100644 index 0000000..e845925 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/SECURITY.md @@ -0,0 +1,47 @@ +# Security Policy + +## Supported Versions + +All skill content in this repository is covered by this security policy. + +| Component | Supported | +|-----------|-----------| +| Skill definitions (SKILL.md files) | Yes | +| Scripts and automation | Yes | +| Documentation | Yes | + +## Reporting a Vulnerability + +If you discover a security issue with any skill's scripts, instructions, or content, please report it responsibly: + +1. **Do not** open a public issue +2. Use GitHub's private security advisory: [Report a vulnerability](https://github.com/mukul975/Anthropic-Cybersecurity-Skills/security/advisories/new) +3. Include in your report: + - Affected skill name and file path + - Nature of the vulnerability + - Potential impact + - Steps to reproduce (if applicable) + - Suggested fix (if you have one) + +## Response Timeline + +- **Initial acknowledgment:** Within 48 hours +- **Assessment and triage:** Within 1 week +- **Fix or mitigation:** Based on severity, typically within 2 weeks + +## Scope + +The following are in scope for security reports: + +- Skills that contain commands or scripts that could cause unintended harm +- Instructions that could lead to unauthorized access if followed incorrectly +- Sensitive data accidentally included in skill content +- Dependencies or external references that have become compromised + +## Recognition + +We credit responsible disclosures in our changelog. If you report a valid security issue, we will acknowledge your contribution unless you prefer to remain anonymous. + +## Contact + +For security matters that cannot be reported through GitHub's advisory system, reach out via the repository's discussion forum. diff --git a/personas/_shared/anthropic-cybersecurity-skills/assets/.gitkeep b/personas/_shared/anthropic-cybersecurity-skills/assets/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/personas/_shared/anthropic-cybersecurity-skills/assets/banner.png b/personas/_shared/anthropic-cybersecurity-skills/assets/banner.png new file mode 100644 index 0000000..8697e71 Binary files /dev/null and b/personas/_shared/anthropic-cybersecurity-skills/assets/banner.png differ diff --git a/personas/_shared/anthropic-cybersecurity-skills/index.json b/personas/_shared/anthropic-cybersecurity-skills/index.json new file mode 100644 index 0000000..5175d16 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/index.json @@ -0,0 +1 @@ +{"version":"1.1.0","generated_at":"2026-04-06T09:17:51Z","repository":"https://github.com/mukul975/Anthropic-Cybersecurity-Skills","domain":"cybersecurity","total_skills":754,"skills":[{"name":"acquiring-disk-image-with-dd-and-dcfldd","description":"Create forensically sound bit-for-bit disk images using dd and dcfldd while preserving evidence integrity through","domain":"cybersecurity","path":"skills/acquiring-disk-image-with-dd-and-dcfldd"},{"name":"analyzing-active-directory-acl-abuse","description":"Detect dangerous ACL misconfigurations in Active Directory using ldap3 to identify GenericAll, WriteDACL, and","domain":"cybersecurity","path":"skills/analyzing-active-directory-acl-abuse"},{"name":"analyzing-android-malware-with-apktool","description":"Perform static analysis of Android APK malware samples using apktool for decompilation, jadx for Java source","domain":"cybersecurity","path":"skills/analyzing-android-malware-with-apktool"},{"name":"analyzing-api-gateway-access-logs","description":"'Parses API Gateway access logs (AWS API Gateway, Kong, Nginx) to detect BOLA/IDOR attacks, rate limit bypass,","domain":"cybersecurity","path":"skills/analyzing-api-gateway-access-logs"},{"name":"analyzing-apt-group-with-mitre-navigator","description":"Analyze advanced persistent threat (APT) group techniques using MITRE ATT&CK Navigator to create layered heatmaps","domain":"cybersecurity","path":"skills/analyzing-apt-group-with-mitre-navigator"},{"name":"analyzing-azure-activity-logs-for-threats","description":"'Queries Azure Monitor activity logs and sign-in logs via azure-monitor-query to detect suspicious administrative","domain":"cybersecurity","path":"skills/analyzing-azure-activity-logs-for-threats"},{"name":"analyzing-bootkit-and-rootkit-samples","description":"'Analyzes bootkit and advanced rootkit malware that infects the Master Boot Record (MBR), Volume Boot Record","domain":"cybersecurity","path":"skills/analyzing-bootkit-and-rootkit-samples"},{"name":"analyzing-browser-forensics-with-hindsight","description":"Analyze Chromium-based browser artifacts using Hindsight to extract browsing history, downloads, cookies, cached","domain":"cybersecurity","path":"skills/analyzing-browser-forensics-with-hindsight"},{"name":"analyzing-campaign-attribution-evidence","description":"Campaign attribution analysis involves systematically evaluating evidence to determine which threat actor or","domain":"cybersecurity","path":"skills/analyzing-campaign-attribution-evidence"},{"name":"analyzing-certificate-transparency-for-phishing","description":"Monitor Certificate Transparency logs using crt.sh and Certstream to detect phishing domains, lookalike certificates,","domain":"cybersecurity","path":"skills/analyzing-certificate-transparency-for-phishing"},{"name":"analyzing-cloud-storage-access-patterns","description":"Detect abnormal access patterns in AWS S3, GCS, and Azure Blob Storage by analyzing CloudTrail Data Events, GCS","domain":"cybersecurity","path":"skills/analyzing-cloud-storage-access-patterns"},{"name":"analyzing-cobalt-strike-beacon-configuration","description":"Extract and analyze Cobalt Strike beacon configuration from PE files and memory dumps to identify C2 infrastructure,","domain":"cybersecurity","path":"skills/analyzing-cobalt-strike-beacon-configuration"},{"name":"analyzing-cobaltstrike-malleable-c2-profiles","description":"Parse and analyze Cobalt Strike Malleable C2 profiles using dissect.cobaltstrike and pyMalleableC2 to extract","domain":"cybersecurity","path":"skills/analyzing-cobaltstrike-malleable-c2-profiles"},{"name":"analyzing-command-and-control-communication","description":"'Analyzes malware command-and-control (C2) communication protocols to understand beacon patterns, command structures,","domain":"cybersecurity","path":"skills/analyzing-command-and-control-communication"},{"name":"analyzing-cyber-kill-chain","description":"'Analyzes intrusion activity against the Lockheed Martin Cyber Kill Chain framework to identify which phases","domain":"cybersecurity","path":"skills/analyzing-cyber-kill-chain"},{"name":"analyzing-disk-image-with-autopsy","description":"Perform comprehensive forensic analysis of disk images using Autopsy to recover files, examine artifacts, and","domain":"cybersecurity","path":"skills/analyzing-disk-image-with-autopsy"},{"name":"analyzing-dns-logs-for-exfiltration","description":"'Analyzes DNS query logs to detect data exfiltration via DNS tunneling, DGA domain communication, and covert","domain":"cybersecurity","path":"skills/analyzing-dns-logs-for-exfiltration"},{"name":"analyzing-docker-container-forensics","description":"Investigate compromised Docker containers by analyzing images, layers, volumes, logs, and runtime artifacts to","domain":"cybersecurity","path":"skills/analyzing-docker-container-forensics"},{"name":"analyzing-email-headers-for-phishing-investigation","description":"Parse and analyze email headers to trace the origin of phishing emails, verify sender authenticity, and identify","domain":"cybersecurity","path":"skills/analyzing-email-headers-for-phishing-investigation"},{"name":"analyzing-ethereum-smart-contract-vulnerabilities","description":"Perform static and symbolic analysis of Solidity smart contracts using Slither and Mythril to detect reentrancy,","domain":"cybersecurity","path":"skills/analyzing-ethereum-smart-contract-vulnerabilities"},{"name":"analyzing-golang-malware-with-ghidra","description":"Reverse engineer Go-compiled malware using Ghidra with specialized scripts for function recovery, string extraction,","domain":"cybersecurity","path":"skills/analyzing-golang-malware-with-ghidra"},{"name":"analyzing-heap-spray-exploitation","description":"Detect and analyze heap spray attacks in memory dumps using Volatility3 plugins to identify NOP sled patterns,","domain":"cybersecurity","path":"skills/analyzing-heap-spray-exploitation"},{"name":"analyzing-indicators-of-compromise","description":"'Analyzes indicators of compromise (IOCs) including IP addresses, domains, file hashes, URLs, and email artifacts","domain":"cybersecurity","path":"skills/analyzing-indicators-of-compromise"},{"name":"analyzing-ios-app-security-with-objection","description":"'Performs runtime mobile security exploration of iOS applications using Objection, a Frida-powered toolkit that","domain":"cybersecurity","path":"skills/analyzing-ios-app-security-with-objection"},{"name":"analyzing-kubernetes-audit-logs","description":"'Parses Kubernetes API server audit logs (JSON lines) to detect exec-into-pod, secret access, RBAC modifications,","domain":"cybersecurity","path":"skills/analyzing-kubernetes-audit-logs"},{"name":"analyzing-linux-audit-logs-for-intrusion","description":"'Uses the Linux Audit framework (auditd) with ausearch and aureport utilities to detect intrusion attempts, unauthorized","domain":"cybersecurity","path":"skills/analyzing-linux-audit-logs-for-intrusion"},{"name":"analyzing-linux-elf-malware","description":"'Analyzes malicious Linux ELF (Executable and Linkable Format) binaries including botnets, cryptominers, ransomware,","domain":"cybersecurity","path":"skills/analyzing-linux-elf-malware"},{"name":"analyzing-linux-kernel-rootkits","description":"Detect kernel-level rootkits in Linux memory dumps using Volatility3 linux plugins (check_syscall, lsmod, hidden_modules),","domain":"cybersecurity","path":"skills/analyzing-linux-kernel-rootkits"},{"name":"analyzing-linux-system-artifacts","description":"Examine Linux system artifacts including auth logs, cron jobs, shell history, and system configuration to uncover","domain":"cybersecurity","path":"skills/analyzing-linux-system-artifacts"},{"name":"analyzing-lnk-file-and-jump-list-artifacts","description":"Analyze Windows LNK shortcut files and Jump List artifacts to establish evidence of file access, program execution,","domain":"cybersecurity","path":"skills/analyzing-lnk-file-and-jump-list-artifacts"},{"name":"analyzing-macro-malware-in-office-documents","description":"'Analyzes malicious VBA macros embedded in Microsoft Office documents (Word, Excel, PowerPoint) to identify download","domain":"cybersecurity","path":"skills/analyzing-macro-malware-in-office-documents"},{"name":"analyzing-malicious-pdf-with-peepdf","description":"Perform static analysis of malicious PDF documents using peepdf, pdfid, and pdf-parser to extract embedded JavaScript,","domain":"cybersecurity","path":"skills/analyzing-malicious-pdf-with-peepdf"},{"name":"analyzing-malicious-url-with-urlscan","description":"URLScan.io is a free service for scanning and analyzing suspicious URLs. It captures screenshots, DOM content,","domain":"cybersecurity","path":"skills/analyzing-malicious-url-with-urlscan"},{"name":"analyzing-malware-behavior-with-cuckoo-sandbox","description":"'Executes malware samples in Cuckoo Sandbox to observe runtime behavior including process creation, file system","domain":"cybersecurity","path":"skills/analyzing-malware-behavior-with-cuckoo-sandbox"},{"name":"analyzing-malware-family-relationships-with-malpedia","description":"Use the Malpedia platform and API to research malware family relationships, track variant evolution, link families","domain":"cybersecurity","path":"skills/analyzing-malware-family-relationships-with-malpedia"},{"name":"analyzing-malware-persistence-with-autoruns","description":"Use Sysinternals Autoruns to systematically identify and analyze malware persistence mechanisms across registry","domain":"cybersecurity","path":"skills/analyzing-malware-persistence-with-autoruns"},{"name":"analyzing-malware-sandbox-evasion-techniques","description":"Detect sandbox evasion techniques in malware samples by analyzing timing checks, VM artifact queries, user interaction","domain":"cybersecurity","path":"skills/analyzing-malware-sandbox-evasion-techniques"},{"name":"analyzing-memory-dumps-with-volatility","description":"'Analyzes RAM memory dumps from compromised systems using the Volatility framework to identify malicious processes,","domain":"cybersecurity","path":"skills/analyzing-memory-dumps-with-volatility"},{"name":"analyzing-memory-forensics-with-lime-and-volatility","description":"'Performs Linux memory acquisition using LiME (Linux Memory Extractor) kernel module and analysis with Volatility","domain":"cybersecurity","path":"skills/analyzing-memory-forensics-with-lime-and-volatility"},{"name":"analyzing-mft-for-deleted-file-recovery","description":"Analyze the NTFS Master File Table ($MFT) to recover metadata and content of deleted files by examining MFT record","domain":"cybersecurity","path":"skills/analyzing-mft-for-deleted-file-recovery"},{"name":"analyzing-network-covert-channels-in-malware","description":"Detect and analyze covert communication channels used by malware including DNS tunneling, ICMP exfiltration,","domain":"cybersecurity","path":"skills/analyzing-network-covert-channels-in-malware"},{"name":"analyzing-network-flow-data-with-netflow","description":"Parse NetFlow v9 and IPFIX records to detect volumetric anomalies, port scanning, data exfiltration, and C2 beaconing","domain":"cybersecurity","path":"skills/analyzing-network-flow-data-with-netflow"},{"name":"analyzing-network-packets-with-scapy","description":"Craft, send, sniff, and dissect network packets using Scapy for protocol analysis, network reconnaissance, and","domain":"cybersecurity","path":"skills/analyzing-network-packets-with-scapy"},{"name":"analyzing-network-traffic-for-incidents","description":"'Analyzes network traffic captures and flow data to identify adversary activity during security incidents, including","domain":"cybersecurity","path":"skills/analyzing-network-traffic-for-incidents"},{"name":"analyzing-network-traffic-of-malware","description":"'Analyzes network traffic generated by malware during sandbox execution or live incident response to identify","domain":"cybersecurity","path":"skills/analyzing-network-traffic-of-malware"},{"name":"analyzing-network-traffic-with-wireshark","description":"'Captures and analyzes network packet data using Wireshark and tshark to identify malicious traffic patterns,","domain":"cybersecurity","path":"skills/analyzing-network-traffic-with-wireshark"},{"name":"analyzing-office365-audit-logs-for-compromise","description":"Parse Office 365 Unified Audit Logs via Microsoft Graph API to detect email forwarding rule creation, inbox delegation,","domain":"cybersecurity","path":"skills/analyzing-office365-audit-logs-for-compromise"},{"name":"analyzing-outlook-pst-for-email-forensics","description":"Analyze Microsoft Outlook PST and OST files for email forensic evidence including message content, headers, attachments,","domain":"cybersecurity","path":"skills/analyzing-outlook-pst-for-email-forensics"},{"name":"analyzing-packed-malware-with-upx-unpacker","description":"'Identifies and unpacks UPX-packed and other packed malware samples to expose the original executable code for","domain":"cybersecurity","path":"skills/analyzing-packed-malware-with-upx-unpacker"},{"name":"analyzing-pdf-malware-with-pdfid","description":"'Analyzes malicious PDF files using PDFiD, pdf-parser, and peepdf to identify embedded JavaScript, shellcode,","domain":"cybersecurity","path":"skills/analyzing-pdf-malware-with-pdfid"},{"name":"analyzing-persistence-mechanisms-in-linux","description":"Detect and analyze Linux persistence mechanisms including crontab entries, systemd service units, LD_PRELOAD","domain":"cybersecurity","path":"skills/analyzing-persistence-mechanisms-in-linux"},{"name":"analyzing-powershell-empire-artifacts","description":"Detect PowerShell Empire framework artifacts in Windows event logs by identifying Base64 encoded launcher patterns,","domain":"cybersecurity","path":"skills/analyzing-powershell-empire-artifacts"},{"name":"analyzing-powershell-script-block-logging","description":"Parse Windows PowerShell Script Block Logs (Event ID 4104) from EVTX files to detect obfuscated commands, encoded","domain":"cybersecurity","path":"skills/analyzing-powershell-script-block-logging"},{"name":"analyzing-prefetch-files-for-execution-history","description":"Parse Windows Prefetch files to determine program execution history including run counts, timestamps, and referenced","domain":"cybersecurity","path":"skills/analyzing-prefetch-files-for-execution-history"},{"name":"analyzing-ransomware-encryption-mechanisms","description":"'Analyzes encryption algorithms, key management, and file encryption routines used by ransomware families to","domain":"cybersecurity","path":"skills/analyzing-ransomware-encryption-mechanisms"},{"name":"analyzing-ransomware-leak-site-intelligence","description":"Monitor and analyze ransomware group data leak sites (DLS) to track victim postings, extract threat intelligence","domain":"cybersecurity","path":"skills/analyzing-ransomware-leak-site-intelligence"},{"name":"analyzing-ransomware-network-indicators","description":"Identify ransomware network indicators including C2 beaconing patterns, TOR exit node connections, data exfiltration","domain":"cybersecurity","path":"skills/analyzing-ransomware-network-indicators"},{"name":"analyzing-ransomware-payment-wallets","description":"'Traces ransomware cryptocurrency payment flows using blockchain analysis tools such as Chainalysis Reactor,","domain":"cybersecurity","path":"skills/analyzing-ransomware-payment-wallets"},{"name":"analyzing-sbom-for-supply-chain-vulnerabilities","description":"'Parses Software Bill of Materials (SBOM) in CycloneDX and SPDX JSON formats to identify supply chain vulnerabilities","domain":"cybersecurity","path":"skills/analyzing-sbom-for-supply-chain-vulnerabilities"},{"name":"analyzing-security-logs-with-splunk","description":"'Leverages Splunk Enterprise Security and SPL (Search Processing Language) to investigate security incidents","domain":"cybersecurity","path":"skills/analyzing-security-logs-with-splunk"},{"name":"analyzing-slack-space-and-file-system-artifacts","description":"Examine file system slack space, MFT entries, USN journal, and alternate data streams to recover hidden data","domain":"cybersecurity","path":"skills/analyzing-slack-space-and-file-system-artifacts"},{"name":"analyzing-supply-chain-malware-artifacts","description":"Investigate supply chain attack artifacts including trojanized software updates, compromised build pipelines,","domain":"cybersecurity","path":"skills/analyzing-supply-chain-malware-artifacts"},{"name":"analyzing-threat-actor-ttps-with-mitre-attack","description":"MITRE ATT&CK is a globally-accessible knowledge base of adversary tactics, techniques, and procedures (TTPs)","domain":"cybersecurity","path":"skills/analyzing-threat-actor-ttps-with-mitre-attack"},{"name":"analyzing-threat-actor-ttps-with-mitre-navigator","description":"'Map advanced persistent threat (APT) group tactics, techniques, and procedures (TTPs) to the MITRE ATT&CK framework","domain":"cybersecurity","path":"skills/analyzing-threat-actor-ttps-with-mitre-navigator"},{"name":"analyzing-threat-intelligence-feeds","description":"'Analyzes structured and unstructured threat intelligence feeds to extract actionable indicators, adversary tactics,","domain":"cybersecurity","path":"skills/analyzing-threat-intelligence-feeds"},{"name":"analyzing-threat-landscape-with-misp","description":"Analyze the threat landscape using MISP (Malware Information Sharing Platform) by querying event statistics,","domain":"cybersecurity","path":"skills/analyzing-threat-landscape-with-misp"},{"name":"analyzing-tls-certificate-transparency-logs","description":"'Queries Certificate Transparency logs via crt.sh and pycrtsh to detect phishing domains, unauthorized certificate","domain":"cybersecurity","path":"skills/analyzing-tls-certificate-transparency-logs"},{"name":"analyzing-typosquatting-domains-with-dnstwist","description":"Detect typosquatting, homograph phishing, and brand impersonation domains using dnstwist to generate domain permutations","domain":"cybersecurity","path":"skills/analyzing-typosquatting-domains-with-dnstwist"},{"name":"analyzing-uefi-bootkit-persistence","description":"'Analyzes UEFI bootkit persistence mechanisms including firmware implants in SPI flash, EFI System Partition","domain":"cybersecurity","path":"skills/analyzing-uefi-bootkit-persistence"},{"name":"analyzing-usb-device-connection-history","description":"Investigate USB device connection history from Windows registry, event logs, and setupapi logs to track removable","domain":"cybersecurity","path":"skills/analyzing-usb-device-connection-history"},{"name":"analyzing-web-server-logs-for-intrusion","description":"Parse Apache and Nginx access logs to detect SQL injection attempts, local file inclusion, directory traversal,","domain":"cybersecurity","path":"skills/analyzing-web-server-logs-for-intrusion"},{"name":"analyzing-windows-amcache-artifacts","description":"'Parses and analyzes the Windows Amcache.hve registry hive to extract evidence of program execution, application","domain":"cybersecurity","path":"skills/analyzing-windows-amcache-artifacts"},{"name":"analyzing-windows-event-logs-in-splunk","description":"'Analyzes Windows Security, System, and Sysmon event logs in Splunk to detect authentication attacks, privilege","domain":"cybersecurity","path":"skills/analyzing-windows-event-logs-in-splunk"},{"name":"analyzing-windows-lnk-files-for-artifacts","description":"Parse Windows LNK shortcut files to extract target paths, timestamps, volume information, and machine identifiers","domain":"cybersecurity","path":"skills/analyzing-windows-lnk-files-for-artifacts"},{"name":"analyzing-windows-prefetch-with-python","description":"Parse Windows Prefetch files using the windowsprefetch Python library to reconstruct application execution history,","domain":"cybersecurity","path":"skills/analyzing-windows-prefetch-with-python"},{"name":"analyzing-windows-registry-for-artifacts","description":"Extract and analyze Windows Registry hives to uncover user activity, installed software, autostart entries, and","domain":"cybersecurity","path":"skills/analyzing-windows-registry-for-artifacts"},{"name":"analyzing-windows-shellbag-artifacts","description":"Analyze Windows Shellbag registry artifacts to reconstruct folder browsing activity, detect access to removable","domain":"cybersecurity","path":"skills/analyzing-windows-shellbag-artifacts"},{"name":"auditing-aws-s3-bucket-permissions","description":"'Systematically audit AWS S3 bucket permissions to identify publicly accessible buckets, overly permissive ACLs,","domain":"cybersecurity","path":"skills/auditing-aws-s3-bucket-permissions"},{"name":"auditing-azure-active-directory-configuration","description":"'Auditing Microsoft Entra ID (Azure Active Directory) configuration to identify risky authentication policies,","domain":"cybersecurity","path":"skills/auditing-azure-active-directory-configuration"},{"name":"auditing-cloud-with-cis-benchmarks","description":"'This skill details how to conduct cloud security audits using Center for Internet Security benchmarks for AWS,","domain":"cybersecurity","path":"skills/auditing-cloud-with-cis-benchmarks"},{"name":"auditing-gcp-iam-permissions","description":"'Auditing Google Cloud Platform IAM permissions to identify overly permissive bindings, primitive role usage,","domain":"cybersecurity","path":"skills/auditing-gcp-iam-permissions"},{"name":"auditing-kubernetes-cluster-rbac","description":"'Auditing Kubernetes cluster RBAC configurations to identify overly permissive roles, wildcard permissions, dangerous","domain":"cybersecurity","path":"skills/auditing-kubernetes-cluster-rbac"},{"name":"auditing-terraform-infrastructure-for-security","description":"'Auditing Terraform infrastructure-as-code for security misconfigurations using Checkov, tfsec, Terrascan, and","domain":"cybersecurity","path":"skills/auditing-terraform-infrastructure-for-security"},{"name":"auditing-tls-certificate-transparency-logs","description":"'Monitors Certificate Transparency (CT) logs to detect unauthorized certificate issuance, discover subdomains","domain":"cybersecurity","path":"skills/auditing-tls-certificate-transparency-logs"},{"name":"automating-ioc-enrichment","description":"'Automates the enrichment of raw indicators of compromise with multi-source threat intelligence context using","domain":"cybersecurity","path":"skills/automating-ioc-enrichment"},{"name":"building-adversary-infrastructure-tracking-system","description":"Build an automated system to track adversary infrastructure using passive DNS, certificate transparency, WHOIS","domain":"cybersecurity","path":"skills/building-adversary-infrastructure-tracking-system"},{"name":"building-attack-pattern-library-from-cti-reports","description":"Extract and catalog attack patterns from cyber threat intelligence reports into a structured STIX-based library","domain":"cybersecurity","path":"skills/building-attack-pattern-library-from-cti-reports"},{"name":"building-automated-malware-submission-pipeline","description":"'Builds an automated malware submission and analysis pipeline that collects suspicious files from endpoints and","domain":"cybersecurity","path":"skills/building-automated-malware-submission-pipeline"},{"name":"building-c2-infrastructure-with-sliver-framework","description":"Build and configure a resilient command-and-control infrastructure using BishopFox's Sliver C2 framework with","domain":"cybersecurity","path":"skills/building-c2-infrastructure-with-sliver-framework"},{"name":"building-cloud-siem-with-sentinel","description":"'This skill covers deploying Microsoft Sentinel as a cloud-native SIEM and SOAR platform for centralized security","domain":"cybersecurity","path":"skills/building-cloud-siem-with-sentinel"},{"name":"building-detection-rule-with-splunk-spl","description":"Build effective detection rules using Splunk Search Processing Language (SPL) correlation searches to identify","domain":"cybersecurity","path":"skills/building-detection-rule-with-splunk-spl"},{"name":"building-detection-rules-with-sigma","description":"'Builds vendor-agnostic detection rules using the Sigma rule format for threat detection across SIEM platforms","domain":"cybersecurity","path":"skills/building-detection-rules-with-sigma"},{"name":"building-devsecops-pipeline-with-gitlab-ci","description":"Design and implement a comprehensive DevSecOps pipeline in GitLab CI/CD integrating SAST, DAST, container scanning,","domain":"cybersecurity","path":"skills/building-devsecops-pipeline-with-gitlab-ci"},{"name":"building-identity-federation-with-saml-azure-ad","description":"Establish SAML 2.0 identity federation between on-premises Active Directory and Azure AD (Microsoft Entra ID)","domain":"cybersecurity","path":"skills/building-identity-federation-with-saml-azure-ad"},{"name":"building-identity-governance-lifecycle-process","description":"'Builds comprehensive identity governance and lifecycle management processes including joiner-mover-leaver automation,","domain":"cybersecurity","path":"skills/building-identity-governance-lifecycle-process"},{"name":"building-incident-response-dashboard","description":"'Builds real-time incident response dashboards in Splunk, Elastic, or Grafana to provide SOC analysts and leadership","domain":"cybersecurity","path":"skills/building-incident-response-dashboard"},{"name":"building-incident-response-playbook","description":"'Designs and documents structured incident response playbooks that define step-by-step procedures for specific","domain":"cybersecurity","path":"skills/building-incident-response-playbook"},{"name":"building-incident-timeline-with-timesketch","description":"Build collaborative forensic incident timelines using Timesketch to ingest, normalize, and analyze multi-source","domain":"cybersecurity","path":"skills/building-incident-timeline-with-timesketch"},{"name":"building-ioc-defanging-and-sharing-pipeline","description":"Build an automated pipeline to defang indicators of compromise (URLs, IPs, domains, emails) for safe sharing","domain":"cybersecurity","path":"skills/building-ioc-defanging-and-sharing-pipeline"},{"name":"building-ioc-enrichment-pipeline-with-opencti","description":"OpenCTI is an open-source platform for managing cyber threat intelligence knowledge, built on STIX 2.1 as its","domain":"cybersecurity","path":"skills/building-ioc-enrichment-pipeline-with-opencti"},{"name":"building-malware-incident-communication-template","description":"Build structured communication templates for malware incidents including stakeholder notifications, executive","domain":"cybersecurity","path":"skills/building-malware-incident-communication-template"},{"name":"building-patch-tuesday-response-process","description":"Establish a structured operational process to triage, test, and deploy Microsoft Patch Tuesday security updates","domain":"cybersecurity","path":"skills/building-patch-tuesday-response-process"},{"name":"building-phishing-reporting-button-workflow","description":"Implement a phishing report button in email clients with automated triage workflow that analyzes user-reported","domain":"cybersecurity","path":"skills/building-phishing-reporting-button-workflow"},{"name":"building-ransomware-playbook-with-cisa-framework","description":"'Builds a structured ransomware incident response playbook aligned with the CISA StopRansomware Guide and NIST","domain":"cybersecurity","path":"skills/building-ransomware-playbook-with-cisa-framework"},{"name":"building-red-team-c2-infrastructure-with-havoc","description":"Deploy and configure the Havoc C2 framework with teamserver, HTTPS listeners, redirectors, and Demon agents for","domain":"cybersecurity","path":"skills/building-red-team-c2-infrastructure-with-havoc"},{"name":"building-role-mining-for-rbac-optimization","description":"Apply bottom-up and top-down role mining techniques to discover optimal RBAC roles from existing user-permission","domain":"cybersecurity","path":"skills/building-role-mining-for-rbac-optimization"},{"name":"building-soc-escalation-matrix","description":"Build a structured SOC escalation matrix defining severity tiers, response SLAs, escalation paths, and notification","domain":"cybersecurity","path":"skills/building-soc-escalation-matrix"},{"name":"building-soc-metrics-and-kpi-tracking","description":"'Builds SOC performance metrics and KPI tracking dashboards measuring Mean Time to Detect (MTTD), Mean Time to","domain":"cybersecurity","path":"skills/building-soc-metrics-and-kpi-tracking"},{"name":"building-soc-playbook-for-ransomware","description":"'Builds a structured SOC incident response playbook for ransomware attacks covering detection, containment, eradication,","domain":"cybersecurity","path":"skills/building-soc-playbook-for-ransomware"},{"name":"building-threat-actor-profile-from-osint","description":"Build comprehensive threat actor profiles using open-source intelligence (OSINT) techniques to document adversary","domain":"cybersecurity","path":"skills/building-threat-actor-profile-from-osint"},{"name":"building-threat-feed-aggregation-with-misp","description":"Deploy MISP (Malware Information Sharing Platform) to aggregate, correlate, and distribute threat intelligence","domain":"cybersecurity","path":"skills/building-threat-feed-aggregation-with-misp"},{"name":"building-threat-hunt-hypothesis-framework","description":"Build a systematic threat hunt hypothesis framework that transforms threat intelligence, attack patterns, and","domain":"cybersecurity","path":"skills/building-threat-hunt-hypothesis-framework"},{"name":"building-threat-intelligence-enrichment-in-splunk","description":"Build automated threat intelligence enrichment pipelines in Splunk Enterprise Security using lookup tables, modular","domain":"cybersecurity","path":"skills/building-threat-intelligence-enrichment-in-splunk"},{"name":"building-threat-intelligence-feed-integration","description":"'Builds automated threat intelligence feed integration pipelines connecting STIX/TAXII feeds, open-source threat","domain":"cybersecurity","path":"skills/building-threat-intelligence-feed-integration"},{"name":"building-threat-intelligence-platform","description":"Building a Threat Intelligence Platform (TIP) involves deploying and integrating multiple CTI tools into a unified","domain":"cybersecurity","path":"skills/building-threat-intelligence-platform"},{"name":"building-vulnerability-aging-and-sla-tracking","description":"Implement a vulnerability aging dashboard and SLA tracking system to measure remediation performance against","domain":"cybersecurity","path":"skills/building-vulnerability-aging-and-sla-tracking"},{"name":"building-vulnerability-dashboard-with-defectdojo","description":"Deploy DefectDojo as a centralized vulnerability management dashboard with scanner integrations, deduplication,","domain":"cybersecurity","path":"skills/building-vulnerability-dashboard-with-defectdojo"},{"name":"building-vulnerability-exception-tracking-system","description":"Build a vulnerability exception and risk acceptance tracking system with approval workflows, compensating controls","domain":"cybersecurity","path":"skills/building-vulnerability-exception-tracking-system"},{"name":"building-vulnerability-scanning-workflow","description":"'Builds a structured vulnerability scanning workflow using tools like Nessus, Qualys, and OpenVAS to discover,","domain":"cybersecurity","path":"skills/building-vulnerability-scanning-workflow"},{"name":"bypassing-authentication-with-forced-browsing","description":"Discovering and accessing unprotected pages, APIs, and administrative interfaces by enumerating URLs and bypassing","domain":"cybersecurity","path":"skills/bypassing-authentication-with-forced-browsing"},{"name":"collecting-indicators-of-compromise","description":"'Systematically collects, categorizes, and distributes indicators of compromise (IOCs) during and after security","domain":"cybersecurity","path":"skills/collecting-indicators-of-compromise"},{"name":"collecting-open-source-intelligence","description":"'Collects and synthesizes open-source intelligence (OSINT) about threat actors, malicious infrastructure, and","domain":"cybersecurity","path":"skills/collecting-open-source-intelligence"},{"name":"collecting-threat-intelligence-with-misp","description":"MISP (Malware Information Sharing Platform) is an open-source threat intelligence platform for gathering, sharing,","domain":"cybersecurity","path":"skills/collecting-threat-intelligence-with-misp"},{"name":"collecting-volatile-evidence-from-compromised-host","description":"Collect volatile forensic evidence from a compromised system following order of volatility, preserving memory,","domain":"cybersecurity","path":"skills/collecting-volatile-evidence-from-compromised-host"},{"name":"conducting-api-security-testing","description":"'Conducts security testing of REST, GraphQL, and gRPC APIs to identify vulnerabilities in authentication, authorization,","domain":"cybersecurity","path":"skills/conducting-api-security-testing"},{"name":"conducting-cloud-incident-response","description":"'Responds to security incidents in cloud environments (AWS, Azure, GCP) by performing identity-based containment,","domain":"cybersecurity","path":"skills/conducting-cloud-incident-response"},{"name":"conducting-cloud-penetration-testing","description":"'This skill outlines methodologies for performing authorized penetration testing against AWS, Azure, and GCP","domain":"cybersecurity","path":"skills/conducting-cloud-penetration-testing"},{"name":"conducting-domain-persistence-with-dcsync","description":"Perform DCSync attacks to replicate Active Directory credentials and establish domain persistence by extracting","domain":"cybersecurity","path":"skills/conducting-domain-persistence-with-dcsync"},{"name":"conducting-external-reconnaissance-with-osint","description":"'Conducts external reconnaissance using Open Source Intelligence (OSINT) techniques to map an organization''s","domain":"cybersecurity","path":"skills/conducting-external-reconnaissance-with-osint"},{"name":"conducting-full-scope-red-team-engagement","description":"Plan and execute a comprehensive red team engagement covering reconnaissance through post-exploitation using","domain":"cybersecurity","path":"skills/conducting-full-scope-red-team-engagement"},{"name":"conducting-internal-network-penetration-test","description":"Execute an internal network penetration test simulating an insider threat or post-breach attacker to identify","domain":"cybersecurity","path":"skills/conducting-internal-network-penetration-test"},{"name":"conducting-internal-reconnaissance-with-bloodhound-ce","description":"Conduct internal Active Directory reconnaissance using BloodHound Community Edition to map attack paths, identify","domain":"cybersecurity","path":"skills/conducting-internal-reconnaissance-with-bloodhound-ce"},{"name":"conducting-malware-incident-response","description":"'Responds to malware infections across enterprise endpoints by identifying the malware family, determining infection","domain":"cybersecurity","path":"skills/conducting-malware-incident-response"},{"name":"conducting-man-in-the-middle-attack-simulation","description":"'Simulates man-in-the-middle attacks using Ettercap, mitmproxy, and Bettercap in authorized environments to intercept,","domain":"cybersecurity","path":"skills/conducting-man-in-the-middle-attack-simulation"},{"name":"conducting-memory-forensics-with-volatility","description":"'Performs memory forensics analysis using Volatility 3 to extract evidence of malware execution, process injection,","domain":"cybersecurity","path":"skills/conducting-memory-forensics-with-volatility"},{"name":"conducting-mobile-app-penetration-test","description":"'Conducts penetration testing of iOS and Android mobile applications following the OWASP Mobile Application Security","domain":"cybersecurity","path":"skills/conducting-mobile-app-penetration-test"},{"name":"conducting-network-penetration-test","description":"'Conducts comprehensive network penetration tests against authorized target environments by performing host discovery,","domain":"cybersecurity","path":"skills/conducting-network-penetration-test"},{"name":"conducting-pass-the-ticket-attack","description":"Pass-the-Ticket (PtT) is a lateral movement technique that uses stolen Kerberos tickets (TGT or TGS) to authenticate","domain":"cybersecurity","path":"skills/conducting-pass-the-ticket-attack"},{"name":"conducting-phishing-incident-response","description":"'Responds to phishing incidents by analyzing reported emails, extracting indicators, assessing credential compromise,","domain":"cybersecurity","path":"skills/conducting-phishing-incident-response"},{"name":"conducting-post-incident-lessons-learned","description":"Facilitate structured post-incident reviews to identify root causes, document what worked and failed, and produce","domain":"cybersecurity","path":"skills/conducting-post-incident-lessons-learned"},{"name":"conducting-social-engineering-penetration-test","description":"Design and execute a social engineering penetration test including phishing, vishing, smishing, and physical","domain":"cybersecurity","path":"skills/conducting-social-engineering-penetration-test"},{"name":"conducting-social-engineering-pretext-call","description":"Plan and execute authorized vishing (voice phishing) pretext calls to assess employee susceptibility to social","domain":"cybersecurity","path":"skills/conducting-social-engineering-pretext-call"},{"name":"conducting-spearphishing-simulation-campaign","description":"Spearphishing simulation is a targeted social engineering attack vector used by red teams to gain initial access.","domain":"cybersecurity","path":"skills/conducting-spearphishing-simulation-campaign"},{"name":"conducting-wireless-network-penetration-test","description":"'Conducts authorized wireless network penetration tests to assess the security of WiFi infrastructure by testing","domain":"cybersecurity","path":"skills/conducting-wireless-network-penetration-test"},{"name":"configuring-active-directory-tiered-model","description":"Implement Microsoft's Enhanced Security Admin Environment (ESAE) tiered administration model for Active Directory.","domain":"cybersecurity","path":"skills/configuring-active-directory-tiered-model"},{"name":"configuring-aws-verified-access-for-ztna","description":"Configure AWS Verified Access to provide VPN-less zero trust network access to internal applications using identity","domain":"cybersecurity","path":"skills/configuring-aws-verified-access-for-ztna"},{"name":"configuring-certificate-authority-with-openssl","description":"A Certificate Authority (CA) is the trust anchor in a PKI hierarchy, responsible for issuing, signing, and revoking","domain":"cybersecurity","path":"skills/configuring-certificate-authority-with-openssl"},{"name":"configuring-host-based-intrusion-detection","description":"'Configures host-based intrusion detection systems (HIDS) to monitor endpoint file integrity, system calls, and","domain":"cybersecurity","path":"skills/configuring-host-based-intrusion-detection"},{"name":"configuring-hsm-for-key-storage","description":"Hardware Security Modules (HSMs) are tamper-resistant physical devices that safeguard cryptographic keys and","domain":"cybersecurity","path":"skills/configuring-hsm-for-key-storage"},{"name":"configuring-identity-aware-proxy-with-google-iap","description":"'Configuring Google Cloud Identity-Aware Proxy (IAP) to enforce per-request identity verification for Compute","domain":"cybersecurity","path":"skills/configuring-identity-aware-proxy-with-google-iap"},{"name":"configuring-ldap-security-hardening","description":"Harden LDAP directory services against common attacks including credential harvesting, LDAP injection, anonymous","domain":"cybersecurity","path":"skills/configuring-ldap-security-hardening"},{"name":"configuring-microsegmentation-for-zero-trust","description":"Configure microsegmentation policies to enforce least-privilege workload-to-workload access using tools like","domain":"cybersecurity","path":"skills/configuring-microsegmentation-for-zero-trust"},{"name":"configuring-multi-factor-authentication-with-duo","description":"Deploy Cisco Duo multi-factor authentication across enterprise applications, VPN, RDP, and SSH access points.","domain":"cybersecurity","path":"skills/configuring-multi-factor-authentication-with-duo"},{"name":"configuring-network-segmentation-with-vlans","description":"'Designs and implements VLAN-based network segmentation on managed switches to isolate network zones, enforce","domain":"cybersecurity","path":"skills/configuring-network-segmentation-with-vlans"},{"name":"configuring-oauth2-authorization-flow","description":"Configure secure OAuth 2.0 authorization flows including Authorization Code with PKCE, Client Credentials, and","domain":"cybersecurity","path":"skills/configuring-oauth2-authorization-flow"},{"name":"configuring-pfsense-firewall-rules","description":"'Configures pfSense firewall rules, NAT policies, VPN tunnels, and traffic shaping to enforce network segmentation,","domain":"cybersecurity","path":"skills/configuring-pfsense-firewall-rules"},{"name":"configuring-snort-ids-for-intrusion-detection","description":"'Installs, configures, and tunes Snort 3 intrusion detection system to monitor network traffic for malicious","domain":"cybersecurity","path":"skills/configuring-snort-ids-for-intrusion-detection"},{"name":"configuring-suricata-for-network-monitoring","description":"'Deploys and configures Suricata IDS/IPS with Emerging Threats rulesets, EVE JSON logging, and custom rules for","domain":"cybersecurity","path":"skills/configuring-suricata-for-network-monitoring"},{"name":"configuring-tls-1-3-for-secure-communications","description":"TLS 1.3 (RFC 8446) is the latest version of the Transport Layer Security protocol, providing significant improvements","domain":"cybersecurity","path":"skills/configuring-tls-1-3-for-secure-communications"},{"name":"configuring-windows-defender-advanced-settings","description":"'Configures Microsoft Defender for Endpoint (MDE) advanced protection settings including attack surface reduction","domain":"cybersecurity","path":"skills/configuring-windows-defender-advanced-settings"},{"name":"configuring-windows-event-logging-for-detection","description":"'Configures Windows Event Logging with advanced audit policies to generate high-fidelity security events for","domain":"cybersecurity","path":"skills/configuring-windows-event-logging-for-detection"},{"name":"configuring-zscaler-private-access-for-ztna","description":"'Configuring Zscaler Private Access (ZPA) to replace traditional VPN with zero trust network access by deploying","domain":"cybersecurity","path":"skills/configuring-zscaler-private-access-for-ztna"},{"name":"containing-active-breach","description":"'Executes containment strategies to stop active adversary operations and prevent lateral movement during a confirmed","domain":"cybersecurity","path":"skills/containing-active-breach"},{"name":"correlating-security-events-in-qradar","description":"'Correlates security events in IBM QRadar SIEM using AQL (Ariel Query Language), custom rules, building blocks,","domain":"cybersecurity","path":"skills/correlating-security-events-in-qradar"},{"name":"correlating-threat-campaigns","description":"'Correlates disparate security incidents, IOCs, and adversary behaviors across time and organizations to identify","domain":"cybersecurity","path":"skills/correlating-threat-campaigns"},{"name":"deobfuscating-javascript-malware","description":"'Deobfuscates malicious JavaScript code used in web-based attacks, phishing pages, and dropper scripts by reversing","domain":"cybersecurity","path":"skills/deobfuscating-javascript-malware"},{"name":"deobfuscating-powershell-obfuscated-malware","description":"Systematically deobfuscate multi-layer PowerShell malware using AST analysis, dynamic tracing, and tools like","domain":"cybersecurity","path":"skills/deobfuscating-powershell-obfuscated-malware"},{"name":"deploying-active-directory-honeytokens","description":"'Deploys deception-based honeytokens in Active Directory including fake privileged accounts with AdminCount=1,","domain":"cybersecurity","path":"skills/deploying-active-directory-honeytokens"},{"name":"deploying-cloudflare-access-for-zero-trust","description":"'Deploying Cloudflare Access with Cloudflare Tunnel to provide zero trust access to self-hosted and private applications,","domain":"cybersecurity","path":"skills/deploying-cloudflare-access-for-zero-trust"},{"name":"deploying-decoy-files-for-ransomware-detection","description":"'Deploys canary files (honeytokens) across file systems to detect ransomware encryption activity in real time.","domain":"cybersecurity","path":"skills/deploying-decoy-files-for-ransomware-detection"},{"name":"deploying-edr-agent-with-crowdstrike","description":"'Deploys and configures CrowdStrike Falcon EDR agents across enterprise endpoints to enable real-time threat","domain":"cybersecurity","path":"skills/deploying-edr-agent-with-crowdstrike"},{"name":"deploying-osquery-for-endpoint-monitoring","description":"'Deploys and configures osquery for real-time endpoint monitoring using SQL-based queries to inspect running","domain":"cybersecurity","path":"skills/deploying-osquery-for-endpoint-monitoring"},{"name":"deploying-palo-alto-prisma-access-zero-trust","description":"'Deploying Palo Alto Networks Prisma Access for SASE-based zero trust network access using GlobalProtect agents,","domain":"cybersecurity","path":"skills/deploying-palo-alto-prisma-access-zero-trust"},{"name":"deploying-ransomware-canary-files","description":"'Deploys and monitors ransomware canary files across critical directories using Python''s watchdog library for","domain":"cybersecurity","path":"skills/deploying-ransomware-canary-files"},{"name":"deploying-software-defined-perimeter","description":"Deploy a Software-Defined Perimeter using the CSA v2.0 specification with Single Packet Authorization, mutual","domain":"cybersecurity","path":"skills/deploying-software-defined-perimeter"},{"name":"deploying-tailscale-for-zero-trust-vpn","description":"Deploy and configure Tailscale as a WireGuard-based zero trust mesh VPN with identity-aware access controls,","domain":"cybersecurity","path":"skills/deploying-tailscale-for-zero-trust-vpn"},{"name":"detecting-ai-model-prompt-injection-attacks","description":"'Detects prompt injection attacks targeting LLM-based applications using a multi-layered defense combining regex","domain":"cybersecurity","path":"skills/detecting-ai-model-prompt-injection-attacks"},{"name":"detecting-anomalies-in-industrial-control-systems","description":"'This skill covers deploying anomaly detection systems for industrial control environments using machine learning","domain":"cybersecurity","path":"skills/detecting-anomalies-in-industrial-control-systems"},{"name":"detecting-anomalous-authentication-patterns","description":"'Detects anomalous authentication patterns using UEBA analytics, statistical baselines, and machine learning","domain":"cybersecurity","path":"skills/detecting-anomalous-authentication-patterns"},{"name":"detecting-api-enumeration-attacks","description":"Detect and prevent API enumeration attacks including BOLA and IDOR exploitation by monitoring sequential identifier","domain":"cybersecurity","path":"skills/detecting-api-enumeration-attacks"},{"name":"detecting-arp-poisoning-in-network-traffic","description":"Detect and prevent ARP spoofing attacks using ARPWatch, Dynamic ARP Inspection, Wireshark analysis, and custom","domain":"cybersecurity","path":"skills/detecting-arp-poisoning-in-network-traffic"},{"name":"detecting-attacks-on-historian-servers","description":"'Detect cyber attacks targeting OT historian servers (OSIsoft PI, Ignition, Wonderware) that sit at the IT/OT","domain":"cybersecurity","path":"skills/detecting-attacks-on-historian-servers"},{"name":"detecting-attacks-on-scada-systems","description":"'This skill covers detecting cyber attacks targeting Supervisory Control and Data Acquisition (SCADA) systems","domain":"cybersecurity","path":"skills/detecting-attacks-on-scada-systems"},{"name":"detecting-aws-cloudtrail-anomalies","description":"Detect unusual API call patterns in AWS CloudTrail logs using boto3, statistical baselining, and behavioral analysis","domain":"cybersecurity","path":"skills/detecting-aws-cloudtrail-anomalies"},{"name":"detecting-aws-credential-exposure-with-trufflehog","description":"'Detecting exposed AWS credentials in source code repositories, CI/CD pipelines, and configuration files using","domain":"cybersecurity","path":"skills/detecting-aws-credential-exposure-with-trufflehog"},{"name":"detecting-aws-guardduty-findings-automation","description":"Automate AWS GuardDuty threat detection findings processing using EventBridge and Lambda to enable real-time","domain":"cybersecurity","path":"skills/detecting-aws-guardduty-findings-automation"},{"name":"detecting-aws-iam-privilege-escalation","description":"Detect AWS IAM privilege escalation paths using boto3 and Cloudsplaining policy analysis to identify overly permissive","domain":"cybersecurity","path":"skills/detecting-aws-iam-privilege-escalation"},{"name":"detecting-azure-lateral-movement","description":"Detect lateral movement in Azure AD/Entra ID environments using Microsoft Graph API audit logs, Azure Sentinel","domain":"cybersecurity","path":"skills/detecting-azure-lateral-movement"},{"name":"detecting-azure-service-principal-abuse","description":"Detect and investigate Azure service principal abuse including privilege escalation, credential compromise, admin","domain":"cybersecurity","path":"skills/detecting-azure-service-principal-abuse"},{"name":"detecting-azure-storage-account-misconfigurations","description":"Audit Azure Blob and ADLS storage accounts for public access exposure, weak or long-lived SAS tokens, missing","domain":"cybersecurity","path":"skills/detecting-azure-storage-account-misconfigurations"},{"name":"detecting-beaconing-patterns-with-zeek","description":"'Performs statistical analysis of Zeek conn.log connection intervals to detect C2 beaconing patterns. Uses the","domain":"cybersecurity","path":"skills/detecting-beaconing-patterns-with-zeek"},{"name":"detecting-bluetooth-low-energy-attacks","description":"'Detects and analyzes Bluetooth Low Energy (BLE) security attacks including sniffing, replay attacks, GATT enumeration","domain":"cybersecurity","path":"skills/detecting-bluetooth-low-energy-attacks"},{"name":"detecting-broken-object-property-level-authorization","description":"Detect and test for OWASP API3:2023 Broken Object Property Level Authorization vulnerabilities including excessive","domain":"cybersecurity","path":"skills/detecting-broken-object-property-level-authorization"},{"name":"detecting-business-email-compromise","description":"Business Email Compromise (BEC) is a sophisticated fraud scheme where attackers impersonate executives, vendors,","domain":"cybersecurity","path":"skills/detecting-business-email-compromise"},{"name":"detecting-business-email-compromise-with-ai","description":"Deploy AI and NLP-powered detection systems to identify business email compromise attacks by analyzing writing","domain":"cybersecurity","path":"skills/detecting-business-email-compromise-with-ai"},{"name":"detecting-cloud-threats-with-guardduty","description":"'This skill teaches security teams how to deploy and operationalize Amazon GuardDuty for continuous threat detection","domain":"cybersecurity","path":"skills/detecting-cloud-threats-with-guardduty"},{"name":"detecting-command-and-control-over-dns","description":"'Detects command-and-control (C2) communications tunneled through DNS protocol including DNS tunneling tools","domain":"cybersecurity","path":"skills/detecting-command-and-control-over-dns"},{"name":"detecting-compromised-cloud-credentials","description":"'Detecting compromised cloud credentials across AWS, Azure, and GCP by analyzing anomalous API activity, impossible","domain":"cybersecurity","path":"skills/detecting-compromised-cloud-credentials"},{"name":"detecting-container-drift-at-runtime","description":"Detect unauthorized modifications to running containers by monitoring for binary execution drift, file system","domain":"cybersecurity","path":"skills/detecting-container-drift-at-runtime"},{"name":"detecting-container-escape-attempts","description":"Container escape is a critical attack technique where an adversary breaks out of container isolation to access","domain":"cybersecurity","path":"skills/detecting-container-escape-attempts"},{"name":"detecting-container-escape-with-falco-rules","description":"Detect container escape attempts in real-time using Falco runtime security rules that monitor syscalls, file","domain":"cybersecurity","path":"skills/detecting-container-escape-with-falco-rules"},{"name":"detecting-credential-dumping-techniques","description":"Detect LSASS credential dumping, SAM database extraction, and NTDS.dit theft using Sysmon Event ID 10, Windows","domain":"cybersecurity","path":"skills/detecting-credential-dumping-techniques"},{"name":"detecting-cryptomining-in-cloud","description":"'This skill teaches security teams how to detect and respond to unauthorized cryptocurrency mining operations","domain":"cybersecurity","path":"skills/detecting-cryptomining-in-cloud"},{"name":"detecting-dcsync-attack-in-active-directory","description":"Detect DCSync attacks where adversaries abuse Active Directory replication privileges to extract password hashes","domain":"cybersecurity","path":"skills/detecting-dcsync-attack-in-active-directory"},{"name":"detecting-deepfake-audio-in-vishing-attacks","description":"'Detects AI-generated deepfake audio used in voice phishing (vishing) attacks by extracting spectral features","domain":"cybersecurity","path":"skills/detecting-deepfake-audio-in-vishing-attacks"},{"name":"detecting-dll-sideloading-attacks","description":"Detect DLL side-loading attacks where adversaries place malicious DLLs alongside legitimate applications to hijack","domain":"cybersecurity","path":"skills/detecting-dll-sideloading-attacks"},{"name":"detecting-dnp3-protocol-anomalies","description":"'Detect anomalies in DNP3 (Distributed Network Protocol 3) communications used in SCADA systems by monitoring","domain":"cybersecurity","path":"skills/detecting-dnp3-protocol-anomalies"},{"name":"detecting-dns-exfiltration-with-dns-query-analysis","description":"Detect data exfiltration through DNS tunneling by analyzing query entropy, subdomain length, query volume, TXT","domain":"cybersecurity","path":"skills/detecting-dns-exfiltration-with-dns-query-analysis"},{"name":"detecting-email-account-compromise","description":"Detect compromised O365 and Google Workspace email accounts by analyzing inbox rule creation, suspicious sign-in","domain":"cybersecurity","path":"skills/detecting-email-account-compromise"},{"name":"detecting-email-forwarding-rules-attack","description":"Detect malicious email forwarding rules created by adversaries to maintain persistent access to email communications","domain":"cybersecurity","path":"skills/detecting-email-forwarding-rules-attack"},{"name":"detecting-evasion-techniques-in-endpoint-logs","description":"'Detects defense evasion techniques used by adversaries in endpoint logs including log tampering, timestomping,","domain":"cybersecurity","path":"skills/detecting-evasion-techniques-in-endpoint-logs"},{"name":"detecting-exfiltration-over-dns-with-zeek","description":"Detect DNS-based data exfiltration by analyzing Zeek dns.log for high-entropy subdomains and anomalous query","domain":"cybersecurity","path":"skills/detecting-exfiltration-over-dns-with-zeek"},{"name":"detecting-fileless-attacks-on-endpoints","description":"'Detects fileless malware and in-memory attacks that execute entirely in RAM without writing persistent files","domain":"cybersecurity","path":"skills/detecting-fileless-attacks-on-endpoints"},{"name":"detecting-fileless-malware-techniques","description":"'Detects and analyzes fileless malware that operates entirely in memory using PowerShell, WMI, .NET reflection,","domain":"cybersecurity","path":"skills/detecting-fileless-malware-techniques"},{"name":"detecting-golden-ticket-attacks-in-kerberos-logs","description":"Detect Golden Ticket attacks in Active Directory by analyzing Kerberos TGT anomalies including mismatched encryption","domain":"cybersecurity","path":"skills/detecting-golden-ticket-attacks-in-kerberos-logs"},{"name":"detecting-golden-ticket-forgery","description":"Detect Kerberos Golden Ticket forgery by analyzing Windows Event ID 4769 for RC4 encryption downgrades (0x17),","domain":"cybersecurity","path":"skills/detecting-golden-ticket-forgery"},{"name":"detecting-insider-data-exfiltration-via-dlp","description":"'Detects insider data exfiltration by analyzing DLP policy violations, file access patterns, upload volume anomalies,","domain":"cybersecurity","path":"skills/detecting-insider-data-exfiltration-via-dlp"},{"name":"detecting-insider-threat-behaviors","description":"Detect insider threat behavioral indicators including unusual data access, off-hours activity, mass file downloads,","domain":"cybersecurity","path":"skills/detecting-insider-threat-behaviors"},{"name":"detecting-insider-threat-with-ueba","description":"Implement User and Entity Behavior Analytics using Elasticsearch/OpenSearch to build behavioral baselines, calculate","domain":"cybersecurity","path":"skills/detecting-insider-threat-with-ueba"},{"name":"detecting-kerberoasting-attacks","description":"Detect Kerberoasting attacks by monitoring for anomalous Kerberos TGS requests targeting service accounts with","domain":"cybersecurity","path":"skills/detecting-kerberoasting-attacks"},{"name":"detecting-lateral-movement-in-network","description":"'Identifies lateral movement techniques in enterprise networks by analyzing authentication logs, network flows,","domain":"cybersecurity","path":"skills/detecting-lateral-movement-in-network"},{"name":"detecting-lateral-movement-with-splunk","description":"Detect adversary lateral movement across networks using Splunk SPL queries against Windows authentication logs,","domain":"cybersecurity","path":"skills/detecting-lateral-movement-with-splunk"},{"name":"detecting-lateral-movement-with-zeek","description":"'Detect lateral movement in network traffic using Zeek (formerly Bro) log analysis. Parses conn.log, smb_mapping.log,","domain":"cybersecurity","path":"skills/detecting-lateral-movement-with-zeek"},{"name":"detecting-living-off-the-land-attacks","description":"'Detect abuse of legitimate Windows binaries (LOLBins) used for living off the land attacks. Monitors process","domain":"cybersecurity","path":"skills/detecting-living-off-the-land-attacks"},{"name":"detecting-living-off-the-land-with-lolbas","description":"Detect Living Off the Land Binaries (LOLBins/LOLBAS) abuse including certutil, regsvr32, mshta, and rundll32","domain":"cybersecurity","path":"skills/detecting-living-off-the-land-with-lolbas"},{"name":"detecting-malicious-scheduled-tasks-with-sysmon","description":"'Detect malicious scheduled task creation and modification using Sysmon Event IDs 1 (Process Create for schtasks.exe),","domain":"cybersecurity","path":"skills/detecting-malicious-scheduled-tasks-with-sysmon"},{"name":"detecting-mimikatz-execution-patterns","description":"Detect Mimikatz execution through command-line patterns, LSASS access signatures, binary indicators, and in-memory","domain":"cybersecurity","path":"skills/detecting-mimikatz-execution-patterns"},{"name":"detecting-misconfigured-azure-storage","description":"'Detecting misconfigured Azure Storage accounts including publicly accessible blob containers, missing encryption","domain":"cybersecurity","path":"skills/detecting-misconfigured-azure-storage"},{"name":"detecting-mobile-malware-behavior","description":"'Detects and analyzes malicious behavior in mobile applications through behavioral analysis, permission abuse","domain":"cybersecurity","path":"skills/detecting-mobile-malware-behavior"},{"name":"detecting-modbus-command-injection-attacks","description":"'Detect command injection attacks against Modbus TCP/RTU protocol in ICS environments by monitoring for unauthorized","domain":"cybersecurity","path":"skills/detecting-modbus-command-injection-attacks"},{"name":"detecting-modbus-protocol-anomalies","description":"'This skill covers detecting anomalies in Modbus/TCP and Modbus RTU communications in industrial control systems.","domain":"cybersecurity","path":"skills/detecting-modbus-protocol-anomalies"},{"name":"detecting-network-anomalies-with-zeek","description":"'Deploys and configures Zeek (formerly Bro) network security monitor to passively analyze network traffic, generate","domain":"cybersecurity","path":"skills/detecting-network-anomalies-with-zeek"},{"name":"detecting-network-scanning-with-ids-signatures","description":"Detect network reconnaissance and port scanning using Suricata and Snort IDS signatures, threshold-based detection","domain":"cybersecurity","path":"skills/detecting-network-scanning-with-ids-signatures"},{"name":"detecting-ntlm-relay-with-event-correlation","description":"'Detect NTLM relay attacks through Windows Security Event correlation by analyzing Event 4624 LogonType 3 for","domain":"cybersecurity","path":"skills/detecting-ntlm-relay-with-event-correlation"},{"name":"detecting-oauth-token-theft","description":"'Detects and responds to OAuth token theft and replay attacks in cloud environments, focusing on Microsoft Entra","domain":"cybersecurity","path":"skills/detecting-oauth-token-theft"},{"name":"detecting-pass-the-hash-attacks","description":"Detect Pass-the-Hash attacks by analyzing NTLM authentication patterns, identifying Type 3 logons with NTLM where","domain":"cybersecurity","path":"skills/detecting-pass-the-hash-attacks"},{"name":"detecting-pass-the-ticket-attacks","description":"Detect Kerberos Pass-the-Ticket (PtT) attacks by analyzing Windows Event IDs 4768, 4769, and 4771 for anomalous","domain":"cybersecurity","path":"skills/detecting-pass-the-ticket-attacks"},{"name":"detecting-port-scanning-with-fail2ban","description":"'Configures Fail2ban with custom filters and actions to detect port scanning activity, SSH brute force attempts,","domain":"cybersecurity","path":"skills/detecting-port-scanning-with-fail2ban"},{"name":"detecting-privilege-escalation-attempts","description":"Detect privilege escalation attempts including token manipulation, UAC bypass, unquoted service paths, kernel","domain":"cybersecurity","path":"skills/detecting-privilege-escalation-attempts"},{"name":"detecting-privilege-escalation-in-kubernetes-pods","description":"Detect and prevent privilege escalation in Kubernetes pods by monitoring security contexts, capabilities, and","domain":"cybersecurity","path":"skills/detecting-privilege-escalation-in-kubernetes-pods"},{"name":"detecting-process-hollowing-technique","description":"Detect process hollowing (T1055.012) by analyzing memory-mapped sections, hollowed process indicators, and parent-child","domain":"cybersecurity","path":"skills/detecting-process-hollowing-technique"},{"name":"detecting-process-injection-techniques","description":"'Detects and analyzes process injection techniques used by malware including classic DLL injection, process hollowing,","domain":"cybersecurity","path":"skills/detecting-process-injection-techniques"},{"name":"detecting-qr-code-phishing-with-email-security","description":"Detect and prevent QR code phishing (quishing) attacks that bypass traditional email security by embedding malicious","domain":"cybersecurity","path":"skills/detecting-qr-code-phishing-with-email-security"},{"name":"detecting-ransomware-encryption-behavior","description":"'Detects ransomware encryption activity in real time using entropy analysis, file system I/O monitoring, and","domain":"cybersecurity","path":"skills/detecting-ransomware-encryption-behavior"},{"name":"detecting-ransomware-precursors-in-network","description":"'Detects early-stage ransomware indicators in network traffic before encryption begins, including initial access","domain":"cybersecurity","path":"skills/detecting-ransomware-precursors-in-network"},{"name":"detecting-rdp-brute-force-attacks","description":"Detect RDP brute force attacks by analyzing Windows Security Event Logs for failed authentication patterns (Event","domain":"cybersecurity","path":"skills/detecting-rdp-brute-force-attacks"},{"name":"detecting-rootkit-activity","description":"'Detects rootkit presence on compromised systems by identifying hidden processes, hooked system calls, modified","domain":"cybersecurity","path":"skills/detecting-rootkit-activity"},{"name":"detecting-s3-data-exfiltration-attempts","description":"'Detecting data exfiltration attempts from AWS S3 buckets by analyzing CloudTrail S3 data events, VPC Flow Logs,","domain":"cybersecurity","path":"skills/detecting-s3-data-exfiltration-attempts"},{"name":"detecting-serverless-function-injection","description":"'Detects and prevents code injection attacks targeting serverless functions (AWS Lambda, Azure Functions, Google","domain":"cybersecurity","path":"skills/detecting-serverless-function-injection"},{"name":"detecting-service-account-abuse","description":"Detect abuse of service accounts through anomalous interactive logons, privilege escalation, lateral movement,","domain":"cybersecurity","path":"skills/detecting-service-account-abuse"},{"name":"detecting-shadow-api-endpoints","description":"Discover and inventory shadow API endpoints that operate outside documented specifications using traffic analysis,","domain":"cybersecurity","path":"skills/detecting-shadow-api-endpoints"},{"name":"detecting-shadow-it-cloud-usage","description":"Detect unauthorized SaaS and cloud service usage (shadow IT) by analyzing proxy logs, DNS query logs, and netflow","domain":"cybersecurity","path":"skills/detecting-shadow-it-cloud-usage"},{"name":"detecting-spearphishing-with-email-gateway","description":"Spearphishing targets specific individuals using personalized, researched content that bypasses generic spam","domain":"cybersecurity","path":"skills/detecting-spearphishing-with-email-gateway"},{"name":"detecting-sql-injection-via-waf-logs","description":"Analyze WAF (ModSecurity/AWS WAF/Cloudflare) logs to detect SQL injection attack campaigns. Parses ModSecurity","domain":"cybersecurity","path":"skills/detecting-sql-injection-via-waf-logs"},{"name":"detecting-stuxnet-style-attacks","description":"'This skill covers detecting sophisticated cyber-physical attacks that follow the Stuxnet attack pattern of modifying","domain":"cybersecurity","path":"skills/detecting-stuxnet-style-attacks"},{"name":"detecting-supply-chain-attacks-in-ci-cd","description":"'Scans GitHub Actions workflows and CI/CD pipeline configurations for supply chain attack vectors including unpinned","domain":"cybersecurity","path":"skills/detecting-supply-chain-attacks-in-ci-cd"},{"name":"detecting-suspicious-oauth-application-consent","description":"Detect risky OAuth application consent grants in Azure AD / Microsoft Entra ID using Microsoft Graph API, audit","domain":"cybersecurity","path":"skills/detecting-suspicious-oauth-application-consent"},{"name":"detecting-suspicious-powershell-execution","description":"Detect suspicious PowerShell execution patterns including encoded commands, download cradles, AMSI bypass attempts,","domain":"cybersecurity","path":"skills/detecting-suspicious-powershell-execution"},{"name":"detecting-t1003-credential-dumping-with-edr","description":"Detect OS credential dumping techniques targeting LSASS memory, SAM database, NTDS.dit, and cached credentials","domain":"cybersecurity","path":"skills/detecting-t1003-credential-dumping-with-edr"},{"name":"detecting-t1055-process-injection-with-sysmon","description":"Detect process injection techniques (T1055) including classic DLL injection, process hollowing, and APC injection","domain":"cybersecurity","path":"skills/detecting-t1055-process-injection-with-sysmon"},{"name":"detecting-t1548-abuse-elevation-control-mechanism","description":"Detect abuse of elevation control mechanisms including UAC bypass, sudo exploitation, and setuid/setgid manipulation","domain":"cybersecurity","path":"skills/detecting-t1548-abuse-elevation-control-mechanism"},{"name":"detecting-typosquatting-packages-in-npm-pypi","description":"'Detects typosquatting attacks in npm and PyPI package registries by analyzing package name similarity using","domain":"cybersecurity","path":"skills/detecting-typosquatting-packages-in-npm-pypi"},{"name":"detecting-wmi-persistence","description":"Detect WMI event subscription persistence by analyzing Sysmon Event IDs 19, 20, and 21 for malicious EventFilter,","domain":"cybersecurity","path":"skills/detecting-wmi-persistence"},{"name":"eradicating-malware-from-infected-systems","description":"Systematically remove malware, backdoors, and attacker persistence mechanisms from infected systems while ensuring","domain":"cybersecurity","path":"skills/eradicating-malware-from-infected-systems"},{"name":"evaluating-threat-intelligence-platforms","description":"'Evaluates and selects Threat Intelligence Platform (TIP) products based on organizational requirements including","domain":"cybersecurity","path":"skills/evaluating-threat-intelligence-platforms"},{"name":"executing-active-directory-attack-simulation","description":"'Executes authorized attack simulations against Active Directory environments to identify misconfigurations,","domain":"cybersecurity","path":"skills/executing-active-directory-attack-simulation"},{"name":"executing-phishing-simulation-campaign","description":"'Executes authorized phishing simulation campaigns to assess an organization''s susceptibility to email-based","domain":"cybersecurity","path":"skills/executing-phishing-simulation-campaign"},{"name":"executing-red-team-engagement-planning","description":"Red team engagement planning is the foundational phase that defines scope, objectives, rules of engagement (ROE),","domain":"cybersecurity","path":"skills/executing-red-team-engagement-planning"},{"name":"executing-red-team-exercise","description":"'Executes comprehensive red team exercises that simulate real-world adversary operations against an organization''s","domain":"cybersecurity","path":"skills/executing-red-team-exercise"},{"name":"exploiting-active-directory-certificate-services-esc1","description":"Exploit misconfigured Active Directory Certificate Services (AD CS) ESC1 vulnerability to request certificates","domain":"cybersecurity","path":"skills/exploiting-active-directory-certificate-services-esc1"},{"name":"exploiting-active-directory-with-bloodhound","description":"BloodHound is a graph-based Active Directory reconnaissance tool that uses graph theory to reveal hidden and","domain":"cybersecurity","path":"skills/exploiting-active-directory-with-bloodhound"},{"name":"exploiting-api-injection-vulnerabilities","description":"'Tests APIs for injection vulnerabilities including SQL injection, NoSQL injection, OS command injection, LDAP","domain":"cybersecurity","path":"skills/exploiting-api-injection-vulnerabilities"},{"name":"exploiting-bgp-hijacking-vulnerabilities","description":"'Analyzes and simulates BGP hijacking scenarios in authorized lab environments to assess route origin validation,","domain":"cybersecurity","path":"skills/exploiting-bgp-hijacking-vulnerabilities"},{"name":"exploiting-broken-function-level-authorization","description":"'Tests APIs for Broken Function Level Authorization (BFLA) vulnerabilities where regular users can invoke administrative","domain":"cybersecurity","path":"skills/exploiting-broken-function-level-authorization"},{"name":"exploiting-broken-link-hijacking","description":"Discover and exploit broken link hijacking vulnerabilities by identifying references to expired domains, decommissioned","domain":"cybersecurity","path":"skills/exploiting-broken-link-hijacking"},{"name":"exploiting-constrained-delegation-abuse","description":"Exploit Kerberos Constrained Delegation misconfigurations in Active Directory to impersonate privileged users","domain":"cybersecurity","path":"skills/exploiting-constrained-delegation-abuse"},{"name":"exploiting-deeplink-vulnerabilities","description":"'Tests and exploits deep link (URL scheme and App Link) vulnerabilities in Android and iOS mobile applications","domain":"cybersecurity","path":"skills/exploiting-deeplink-vulnerabilities"},{"name":"exploiting-excessive-data-exposure-in-api","description":"'Tests APIs for excessive data exposure where endpoints return more data than the client application needs, relying","domain":"cybersecurity","path":"skills/exploiting-excessive-data-exposure-in-api"},{"name":"exploiting-http-request-smuggling","description":"Detecting and exploiting HTTP request smuggling vulnerabilities caused by Content-Length and Transfer-Encoding","domain":"cybersecurity","path":"skills/exploiting-http-request-smuggling"},{"name":"exploiting-idor-vulnerabilities","description":"Identifying and exploiting Insecure Direct Object Reference vulnerabilities to access unauthorized resources","domain":"cybersecurity","path":"skills/exploiting-idor-vulnerabilities"},{"name":"exploiting-insecure-data-storage-in-mobile","description":"'Identifies and exploits insecure local data storage vulnerabilities in Android and iOS mobile applications including","domain":"cybersecurity","path":"skills/exploiting-insecure-data-storage-in-mobile"},{"name":"exploiting-insecure-deserialization","description":"Identifying and exploiting insecure deserialization vulnerabilities in Java, PHP, Python, and .NET applications","domain":"cybersecurity","path":"skills/exploiting-insecure-deserialization"},{"name":"exploiting-ipv6-vulnerabilities","description":"'Identifies and exploits IPv6-specific vulnerabilities including SLAAC spoofing, Router Advertisement flooding,","domain":"cybersecurity","path":"skills/exploiting-ipv6-vulnerabilities"},{"name":"exploiting-jwt-algorithm-confusion-attack","description":"'Exploits JWT algorithm confusion vulnerabilities where the server''s token verification library accepts the","domain":"cybersecurity","path":"skills/exploiting-jwt-algorithm-confusion-attack"},{"name":"exploiting-kerberoasting-with-impacket","description":"Perform Kerberoasting attacks using Impacket's GetUserSPNs to extract and crack Kerberos TGS tickets for Active","domain":"cybersecurity","path":"skills/exploiting-kerberoasting-with-impacket"},{"name":"exploiting-mass-assignment-in-rest-apis","description":"Discover and exploit mass assignment vulnerabilities in REST APIs to escalate privileges, modify restricted fields,","domain":"cybersecurity","path":"skills/exploiting-mass-assignment-in-rest-apis"},{"name":"exploiting-ms17-010-eternalblue-vulnerability","description":"MS17-010 (EternalBlue) is a critical vulnerability in Microsoft's SMBv1 implementation that allows remote code","domain":"cybersecurity","path":"skills/exploiting-ms17-010-eternalblue-vulnerability"},{"name":"exploiting-nopac-cve-2021-42278-42287","description":"Exploit the noPac vulnerability chain (CVE-2021-42278 sAMAccountName spoofing and CVE-2021-42287 KDC PAC confusion)","domain":"cybersecurity","path":"skills/exploiting-nopac-cve-2021-42278-42287"},{"name":"exploiting-nosql-injection-vulnerabilities","description":"Detect and exploit NoSQL injection vulnerabilities in MongoDB, CouchDB, and other NoSQL databases to demonstrate","domain":"cybersecurity","path":"skills/exploiting-nosql-injection-vulnerabilities"},{"name":"exploiting-oauth-misconfiguration","description":"Identifying and exploiting OAuth 2.0 and OpenID Connect misconfigurations including redirect URI manipulation,","domain":"cybersecurity","path":"skills/exploiting-oauth-misconfiguration"},{"name":"exploiting-prototype-pollution-in-javascript","description":"Detect and exploit JavaScript prototype pollution vulnerabilities on both client-side and server-side applications","domain":"cybersecurity","path":"skills/exploiting-prototype-pollution-in-javascript"},{"name":"exploiting-race-condition-vulnerabilities","description":"Detect and exploit race condition vulnerabilities in web applications using Turbo Intruder's single-packet attack","domain":"cybersecurity","path":"skills/exploiting-race-condition-vulnerabilities"},{"name":"exploiting-server-side-request-forgery","description":"Identifying and exploiting SSRF vulnerabilities to access internal services, cloud metadata, and restricted network","domain":"cybersecurity","path":"skills/exploiting-server-side-request-forgery"},{"name":"exploiting-smb-vulnerabilities-with-metasploit","description":"'Identifies and exploits SMB protocol vulnerabilities using Metasploit Framework during authorized penetration","domain":"cybersecurity","path":"skills/exploiting-smb-vulnerabilities-with-metasploit"},{"name":"exploiting-sql-injection-vulnerabilities","description":"'Identifies and exploits SQL injection vulnerabilities in web applications during authorized penetration tests","domain":"cybersecurity","path":"skills/exploiting-sql-injection-vulnerabilities"},{"name":"exploiting-sql-injection-with-sqlmap","description":"Detecting and exploiting SQL injection vulnerabilities using sqlmap to extract database contents during authorized","domain":"cybersecurity","path":"skills/exploiting-sql-injection-with-sqlmap"},{"name":"exploiting-template-injection-vulnerabilities","description":"Detecting and exploiting Server-Side Template Injection (SSTI) vulnerabilities across Jinja2, Twig, Freemarker,","domain":"cybersecurity","path":"skills/exploiting-template-injection-vulnerabilities"},{"name":"exploiting-type-juggling-vulnerabilities","description":"Exploit PHP type juggling vulnerabilities caused by loose comparison operators to bypass authentication, circumvent","domain":"cybersecurity","path":"skills/exploiting-type-juggling-vulnerabilities"},{"name":"exploiting-vulnerabilities-with-metasploit-framework","description":"The Metasploit Framework is the world's most widely used penetration testing platform, maintained by Rapid7.","domain":"cybersecurity","path":"skills/exploiting-vulnerabilities-with-metasploit-framework"},{"name":"exploiting-websocket-vulnerabilities","description":"Testing WebSocket implementations for authentication bypass, cross-site hijacking, injection attacks, and insecure","domain":"cybersecurity","path":"skills/exploiting-websocket-vulnerabilities"},{"name":"exploiting-zerologon-vulnerability-cve-2020-1472","description":"Exploit the Zerologon vulnerability (CVE-2020-1472) in the Netlogon Remote Protocol to achieve domain controller","domain":"cybersecurity","path":"skills/exploiting-zerologon-vulnerability-cve-2020-1472"},{"name":"extracting-browser-history-artifacts","description":"Extract and analyze browser history, cookies, cache, downloads, and bookmarks from Chrome, Firefox, and Edge","domain":"cybersecurity","path":"skills/extracting-browser-history-artifacts"},{"name":"extracting-config-from-agent-tesla-rat","description":"Extract embedded configuration from Agent Tesla RAT samples including SMTP/FTP/Telegram exfiltration credentials,","domain":"cybersecurity","path":"skills/extracting-config-from-agent-tesla-rat"},{"name":"extracting-credentials-from-memory-dump","description":"Extract cached credentials, password hashes, Kerberos tickets, and authentication tokens from memory dumps using","domain":"cybersecurity","path":"skills/extracting-credentials-from-memory-dump"},{"name":"extracting-iocs-from-malware-samples","description":"'Extracts indicators of compromise (IOCs) from malware samples including file hashes, network indicators (IPs,","domain":"cybersecurity","path":"skills/extracting-iocs-from-malware-samples"},{"name":"extracting-memory-artifacts-with-rekall","description":"'Uses Rekall memory forensics framework to analyze memory dumps for process hollowing, injected code via VAD","domain":"cybersecurity","path":"skills/extracting-memory-artifacts-with-rekall"},{"name":"extracting-windows-event-logs-artifacts","description":"Extract, parse, and analyze Windows Event Logs (EVTX) using Chainsaw, Hayabusa, and EvtxECmd to detect lateral","domain":"cybersecurity","path":"skills/extracting-windows-event-logs-artifacts"},{"name":"generating-threat-intelligence-reports","description":"'Generates structured cyber threat intelligence reports at strategic, operational, and tactical levels tailored","domain":"cybersecurity","path":"skills/generating-threat-intelligence-reports"},{"name":"hardening-docker-containers-for-production","description":"Hardening Docker containers for production involves applying security best practices aligned with CIS Docker","domain":"cybersecurity","path":"skills/hardening-docker-containers-for-production"},{"name":"hardening-docker-daemon-configuration","description":"Harden the Docker daemon by configuring daemon.json with user namespace remapping, TLS authentication, rootless","domain":"cybersecurity","path":"skills/hardening-docker-daemon-configuration"},{"name":"hardening-linux-endpoint-with-cis-benchmark","description":"'Hardens Linux endpoints using CIS Benchmark recommendations for Ubuntu, RHEL, and CentOS to reduce attack surface,","domain":"cybersecurity","path":"skills/hardening-linux-endpoint-with-cis-benchmark"},{"name":"hardening-windows-endpoint-with-cis-benchmark","description":"'Hardens Windows endpoints using CIS (Center for Internet Security) Benchmark recommendations to reduce attack","domain":"cybersecurity","path":"skills/hardening-windows-endpoint-with-cis-benchmark"},{"name":"hunting-advanced-persistent-threats","description":"'Proactively hunts for Advanced Persistent Threat (APT) activity within enterprise environments using hypothesis-driven","domain":"cybersecurity","path":"skills/hunting-advanced-persistent-threats"},{"name":"hunting-credential-stuffing-attacks","description":"'Detects credential stuffing attacks by analyzing authentication logs for login velocity anomalies, ASN diversity,","domain":"cybersecurity","path":"skills/hunting-credential-stuffing-attacks"},{"name":"hunting-for-anomalous-powershell-execution","description":"'Hunt for malicious PowerShell activity by analyzing Script Block Logging (Event 4104), Module Logging (Event","domain":"cybersecurity","path":"skills/hunting-for-anomalous-powershell-execution"},{"name":"hunting-for-beaconing-with-frequency-analysis","description":"Identify command-and-control beaconing patterns in network traffic by applying statistical frequency analysis,","domain":"cybersecurity","path":"skills/hunting-for-beaconing-with-frequency-analysis"},{"name":"hunting-for-cobalt-strike-beacons","description":"Detect Cobalt Strike beacon network activity using default TLS certificate signatures (serial 8BB00EE), JA3/JA3S/JARM","domain":"cybersecurity","path":"skills/hunting-for-cobalt-strike-beacons"},{"name":"hunting-for-command-and-control-beaconing","description":"Detect C2 beaconing patterns in network traffic using frequency analysis, jitter detection, and domain reputation","domain":"cybersecurity","path":"skills/hunting-for-command-and-control-beaconing"},{"name":"hunting-for-data-exfiltration-indicators","description":"Hunt for data exfiltration through network traffic analysis, detecting unusual data flows, DNS tunneling, cloud","domain":"cybersecurity","path":"skills/hunting-for-data-exfiltration-indicators"},{"name":"hunting-for-data-staging-before-exfiltration","description":"Detect data staging activity before exfiltration by monitoring for archive creation with 7-Zip/RAR, unusual temp","domain":"cybersecurity","path":"skills/hunting-for-data-staging-before-exfiltration"},{"name":"hunting-for-dcom-lateral-movement","description":"'Hunt for DCOM-based lateral movement by detecting abuse of MMC20.Application, ShellBrowserWindow, and ShellWindows","domain":"cybersecurity","path":"skills/hunting-for-dcom-lateral-movement"},{"name":"hunting-for-dcsync-attacks","description":"Detect DCSync attacks by analyzing Windows Event ID 4662 for unauthorized DS-Replication-Get-Changes requests","domain":"cybersecurity","path":"skills/hunting-for-dcsync-attacks"},{"name":"hunting-for-defense-evasion-via-timestomping","description":"'Detect NTFS timestamp manipulation (MITRE T1070.006) by comparing $STANDARD_INFORMATION vs $FILE_NAME timestamps","domain":"cybersecurity","path":"skills/hunting-for-defense-evasion-via-timestomping"},{"name":"hunting-for-dns-based-persistence","description":"Hunt for DNS-based persistence mechanisms including DNS hijacking, dangling CNAME records, wildcard DNS abuse,","domain":"cybersecurity","path":"skills/hunting-for-dns-based-persistence"},{"name":"hunting-for-dns-tunneling-with-zeek","description":"Detect DNS tunneling and data exfiltration by analyzing Zeek dns.log for high-entropy subdomain queries, excessive","domain":"cybersecurity","path":"skills/hunting-for-dns-tunneling-with-zeek"},{"name":"hunting-for-domain-fronting-c2-traffic","description":"Detect domain fronting C2 traffic by analyzing SNI vs HTTP Host header mismatches in proxy logs and TLS certificate","domain":"cybersecurity","path":"skills/hunting-for-domain-fronting-c2-traffic"},{"name":"hunting-for-lateral-movement-via-wmi","description":"Detect WMI-based lateral movement by analyzing Windows Event ID 4688 process creation and Sysmon Event ID 1 for","domain":"cybersecurity","path":"skills/hunting-for-lateral-movement-via-wmi"},{"name":"hunting-for-living-off-the-cloud-techniques","description":"Hunt for adversary abuse of legitimate cloud services for C2, data staging, and exfiltration including abuse","domain":"cybersecurity","path":"skills/hunting-for-living-off-the-cloud-techniques"},{"name":"hunting-for-living-off-the-land-binaries","description":"Proactively hunt for adversary abuse of legitimate system binaries (LOLBins) to execute malicious payloads while","domain":"cybersecurity","path":"skills/hunting-for-living-off-the-land-binaries"},{"name":"hunting-for-lolbins-execution-in-endpoint-logs","description":"Hunt for adversary abuse of Living Off the Land Binaries (LOLBins) by analyzing endpoint process creation logs","domain":"cybersecurity","path":"skills/hunting-for-lolbins-execution-in-endpoint-logs"},{"name":"hunting-for-ntlm-relay-attacks","description":"Detect NTLM relay attacks by analyzing Windows Event 4624 logon type 3 with NTLMSSP authentication, identifying","domain":"cybersecurity","path":"skills/hunting-for-ntlm-relay-attacks"},{"name":"hunting-for-persistence-mechanisms-in-windows","description":"Systematically hunt for adversary persistence mechanisms across Windows endpoints including registry, services,","domain":"cybersecurity","path":"skills/hunting-for-persistence-mechanisms-in-windows"},{"name":"hunting-for-persistence-via-wmi-subscriptions","description":"Hunt for adversary persistence through Windows Management Instrumentation event subscriptions by monitoring WMI","domain":"cybersecurity","path":"skills/hunting-for-persistence-via-wmi-subscriptions"},{"name":"hunting-for-process-injection-techniques","description":"Detect process injection techniques (T1055) including CreateRemoteThread, process hollowing, and DLL injection","domain":"cybersecurity","path":"skills/hunting-for-process-injection-techniques"},{"name":"hunting-for-registry-persistence-mechanisms","description":"Hunt for registry-based persistence mechanisms including Run keys, Winlogon modifications, IFEO injection, and","domain":"cybersecurity","path":"skills/hunting-for-registry-persistence-mechanisms"},{"name":"hunting-for-registry-run-key-persistence","description":"Detect MITRE ATT&CK T1547.001 registry Run key persistence by analyzing Sysmon Event ID 13 logs and registry","domain":"cybersecurity","path":"skills/hunting-for-registry-run-key-persistence"},{"name":"hunting-for-scheduled-task-persistence","description":"Hunt for adversary persistence via Windows Scheduled Tasks by analyzing task creation events, suspicious task","domain":"cybersecurity","path":"skills/hunting-for-scheduled-task-persistence"},{"name":"hunting-for-shadow-copy-deletion","description":"Hunt for Volume Shadow Copy deletion activity that indicates ransomware preparation or anti-forensics by monitoring","domain":"cybersecurity","path":"skills/hunting-for-shadow-copy-deletion"},{"name":"hunting-for-spearphishing-indicators","description":"Hunt for spearphishing campaign indicators across email logs, endpoint telemetry, and network data to detect","domain":"cybersecurity","path":"skills/hunting-for-spearphishing-indicators"},{"name":"hunting-for-startup-folder-persistence","description":"Detect T1547.001 startup folder persistence by monitoring Windows startup directories for suspicious file creation,","domain":"cybersecurity","path":"skills/hunting-for-startup-folder-persistence"},{"name":"hunting-for-supply-chain-compromise","description":"Hunt for supply chain compromise indicators including trojanized software updates, compromised dependencies,","domain":"cybersecurity","path":"skills/hunting-for-supply-chain-compromise"},{"name":"hunting-for-suspicious-scheduled-tasks","description":"Hunt for adversary persistence and execution via Windows scheduled tasks by analyzing task creation events, suspicious","domain":"cybersecurity","path":"skills/hunting-for-suspicious-scheduled-tasks"},{"name":"hunting-for-t1098-account-manipulation","description":"Hunt for MITRE ATT&CK T1098 account manipulation including shadow admin creation, SID history injection, group","domain":"cybersecurity","path":"skills/hunting-for-t1098-account-manipulation"},{"name":"hunting-for-unusual-network-connections","description":"Hunt for unusual network connections by analyzing outbound traffic patterns, rare destinations, non-standard","domain":"cybersecurity","path":"skills/hunting-for-unusual-network-connections"},{"name":"hunting-for-unusual-service-installations","description":"Detect suspicious Windows service installations (MITRE ATT&CK T1543.003) by parsing System event logs for Event","domain":"cybersecurity","path":"skills/hunting-for-unusual-service-installations"},{"name":"hunting-for-webshell-activity","description":"Hunt for web shell deployments on internet-facing servers by analyzing file creation in web directories, suspicious","domain":"cybersecurity","path":"skills/hunting-for-webshell-activity"},{"name":"implementing-aes-encryption-for-data-at-rest","description":"AES (Advanced Encryption Standard) is a symmetric block cipher standardized by NIST (FIPS 197) used to protect","domain":"cybersecurity","path":"skills/implementing-aes-encryption-for-data-at-rest"},{"name":"implementing-alert-fatigue-reduction","description":"'Implements strategies to reduce SOC alert fatigue by tuning detection rules, consolidating duplicate alerts,","domain":"cybersecurity","path":"skills/implementing-alert-fatigue-reduction"},{"name":"implementing-anti-phishing-training-program","description":"Security awareness training is the human layer of phishing defense. An effective anti-phishing training program","domain":"cybersecurity","path":"skills/implementing-anti-phishing-training-program"},{"name":"implementing-anti-ransomware-group-policy","description":"'Configures Windows Group Policy Objects (GPO) to prevent ransomware execution and limit its spread. Implements","domain":"cybersecurity","path":"skills/implementing-anti-ransomware-group-policy"},{"name":"implementing-api-abuse-detection-with-rate-limiting","description":"Implement API abuse detection using token bucket, sliding window, and adaptive rate limiting algorithms to prevent","domain":"cybersecurity","path":"skills/implementing-api-abuse-detection-with-rate-limiting"},{"name":"implementing-api-gateway-security-controls","description":"'Implements security controls at the API gateway layer including authentication enforcement, rate limiting, request","domain":"cybersecurity","path":"skills/implementing-api-gateway-security-controls"},{"name":"implementing-api-key-security-controls","description":"'Implements secure API key generation, storage, rotation, and revocation controls to protect API authentication","domain":"cybersecurity","path":"skills/implementing-api-key-security-controls"},{"name":"implementing-api-rate-limiting-and-throttling","description":"'Implements API rate limiting and throttling controls using token bucket, sliding window, and fixed window algorithms","domain":"cybersecurity","path":"skills/implementing-api-rate-limiting-and-throttling"},{"name":"implementing-api-schema-validation-security","description":"Implement API schema validation using OpenAPI specifications and JSON Schema to enforce input/output contracts","domain":"cybersecurity","path":"skills/implementing-api-schema-validation-security"},{"name":"implementing-api-security-posture-management","description":"Implement API Security Posture Management to continuously discover, classify, and score APIs based on risk while","domain":"cybersecurity","path":"skills/implementing-api-security-posture-management"},{"name":"implementing-api-security-testing-with-42crunch","description":"Implement comprehensive API security testing using the 42Crunch platform to perform static audit and dynamic","domain":"cybersecurity","path":"skills/implementing-api-security-testing-with-42crunch"},{"name":"implementing-api-threat-protection-with-apigee","description":"Implement API threat protection using Google Apigee policies including JSON/XML threat protection, OAuth 2.0,","domain":"cybersecurity","path":"skills/implementing-api-threat-protection-with-apigee"},{"name":"implementing-application-whitelisting-with-applocker","description":"'Implements application whitelisting using Windows AppLocker to restrict unauthorized software execution on endpoints,","domain":"cybersecurity","path":"skills/implementing-application-whitelisting-with-applocker"},{"name":"implementing-aqua-security-for-container-scanning","description":"Deploy Aqua Security's Trivy scanner to detect vulnerabilities, misconfigurations, secrets, and license issues","domain":"cybersecurity","path":"skills/implementing-aqua-security-for-container-scanning"},{"name":"implementing-attack-path-analysis-with-xm-cyber","description":"Deploy XM Cyber's continuous exposure management platform to map attack paths, identify choke points, and prioritize","domain":"cybersecurity","path":"skills/implementing-attack-path-analysis-with-xm-cyber"},{"name":"implementing-attack-surface-management","description":"'Implements external attack surface management (EASM) using Shodan, Censys, and ProjectDiscovery tools (subfinder,","domain":"cybersecurity","path":"skills/implementing-attack-surface-management"},{"name":"implementing-aws-config-rules-for-compliance","description":"'Implementing AWS Config rules for continuous compliance monitoring of AWS resources, deploying managed and custom","domain":"cybersecurity","path":"skills/implementing-aws-config-rules-for-compliance"},{"name":"implementing-aws-iam-permission-boundaries","description":"Configure IAM permission boundaries in AWS to delegate role creation to developers while enforcing maximum privilege","domain":"cybersecurity","path":"skills/implementing-aws-iam-permission-boundaries"},{"name":"implementing-aws-macie-for-data-classification","description":"Implement Amazon Macie to automatically discover, classify, and protect sensitive data in S3 buckets using machine","domain":"cybersecurity","path":"skills/implementing-aws-macie-for-data-classification"},{"name":"implementing-aws-nitro-enclave-security","description":"'Implements AWS Nitro Enclave-based confidential computing environments with cryptographic attestation, KMS policy","domain":"cybersecurity","path":"skills/implementing-aws-nitro-enclave-security"},{"name":"implementing-aws-security-hub","description":"'This skill covers deploying AWS Security Hub as a centralized cloud security posture management platform that","domain":"cybersecurity","path":"skills/implementing-aws-security-hub"},{"name":"implementing-aws-security-hub-compliance","description":"'Implementing AWS Security Hub to aggregate security findings across AWS accounts, enable compliance standards","domain":"cybersecurity","path":"skills/implementing-aws-security-hub-compliance"},{"name":"implementing-azure-ad-privileged-identity-management","description":"Configure Microsoft Entra Privileged Identity Management to enforce just-in-time role activation, approval workflows,","domain":"cybersecurity","path":"skills/implementing-azure-ad-privileged-identity-management"},{"name":"implementing-azure-defender-for-cloud","description":"'Implementing Microsoft Defender for Cloud to enable cloud security posture management, workload protection across","domain":"cybersecurity","path":"skills/implementing-azure-defender-for-cloud"},{"name":"implementing-beyondcorp-zero-trust-access-model","description":"'Implementing Google''s BeyondCorp zero trust access model to eliminate implicit trust from the network perimeter,","domain":"cybersecurity","path":"skills/implementing-beyondcorp-zero-trust-access-model"},{"name":"implementing-bgp-security-with-rpki","description":"Implement BGP route origin validation using RPKI with Route Origin Authorizations, RPKI-to-Router protocol, and","domain":"cybersecurity","path":"skills/implementing-bgp-security-with-rpki"},{"name":"implementing-browser-isolation-for-zero-trust","description":"'Deploys remote browser isolation (RBI) as a core component of a Zero Trust architecture. Implements isolation","domain":"cybersecurity","path":"skills/implementing-browser-isolation-for-zero-trust"},{"name":"implementing-canary-tokens-for-network-intrusion","description":"'Deploys DNS, HTTP, and AWS API key canary tokens across network infrastructure to detect unauthorized access","domain":"cybersecurity","path":"skills/implementing-canary-tokens-for-network-intrusion"},{"name":"implementing-cisa-zero-trust-maturity-model","description":"Implement the CISA Zero Trust Maturity Model v2.0 across the five pillars of identity, devices, networks, applications,","domain":"cybersecurity","path":"skills/implementing-cisa-zero-trust-maturity-model"},{"name":"implementing-cloud-dlp-for-data-protection","description":"'Implementing Cloud Data Loss Prevention (DLP) using Amazon Macie, Azure Information Protection, and Google Cloud","domain":"cybersecurity","path":"skills/implementing-cloud-dlp-for-data-protection"},{"name":"implementing-cloud-security-posture-management","description":"'Implementing Cloud Security Posture Management (CSPM) to continuously monitor multi-cloud environments for misconfigurations,","domain":"cybersecurity","path":"skills/implementing-cloud-security-posture-management"},{"name":"implementing-cloud-trail-log-analysis","description":"'Implementing AWS CloudTrail log analysis for security monitoring, threat detection, and forensic investigation","domain":"cybersecurity","path":"skills/implementing-cloud-trail-log-analysis"},{"name":"implementing-cloud-vulnerability-posture-management","description":"Implement Cloud Security Posture Management using AWS Security Hub, Azure Defender for Cloud, and open-source","domain":"cybersecurity","path":"skills/implementing-cloud-vulnerability-posture-management"},{"name":"implementing-cloud-waf-rules","description":"'This skill covers deploying and tuning Web Application Firewall rules on AWS WAF, Azure WAF, and Cloudflare","domain":"cybersecurity","path":"skills/implementing-cloud-waf-rules"},{"name":"implementing-cloud-workload-protection","description":"'Implements cloud workload protection using boto3 and google-cloud APIs for runtime security monitoring, process","domain":"cybersecurity","path":"skills/implementing-cloud-workload-protection"},{"name":"implementing-code-signing-for-artifacts","description":"'This skill covers implementing code signing for build artifacts to ensure integrity and authenticity throughout","domain":"cybersecurity","path":"skills/implementing-code-signing-for-artifacts"},{"name":"implementing-conditional-access-policies-azure-ad","description":"Configure Microsoft Entra ID (Azure AD) Conditional Access policies for zero trust access control. Covers signal-based","domain":"cybersecurity","path":"skills/implementing-conditional-access-policies-azure-ad"},{"name":"implementing-conduit-security-for-ot-remote-access","description":"'Implement secure conduit architecture for OT remote access following IEC 62443 zones and conduits model, deploying","domain":"cybersecurity","path":"skills/implementing-conduit-security-for-ot-remote-access"},{"name":"implementing-container-image-minimal-base-with-distroless","description":"Reduce container attack surface by building application images on Google distroless base images that contain","domain":"cybersecurity","path":"skills/implementing-container-image-minimal-base-with-distroless"},{"name":"implementing-container-network-policies-with-calico","description":"Enforce Kubernetes network segmentation using Calico CNI network policies and global network policies to control","domain":"cybersecurity","path":"skills/implementing-container-network-policies-with-calico"},{"name":"implementing-continuous-security-validation-with-bas","description":"Deploy Breach and Attack Simulation tools to continuously validate security control effectiveness by safely emulating","domain":"cybersecurity","path":"skills/implementing-continuous-security-validation-with-bas"},{"name":"implementing-data-loss-prevention-with-microsoft-purview","description":"'Implements data loss prevention policies using Microsoft Purview to protect sensitive information across Exchange","domain":"cybersecurity","path":"skills/implementing-data-loss-prevention-with-microsoft-purview"},{"name":"implementing-ddos-mitigation-with-cloudflare","description":"Configure Cloudflare DDoS protection with managed rulesets, rate limiting, WAF rules, Bot Management, and origin","domain":"cybersecurity","path":"skills/implementing-ddos-mitigation-with-cloudflare"},{"name":"implementing-deception-based-detection-with-canarytoken","description":"Deploy and monitor Canary Tokens via the Thinkst Canary API for deception-based breach detection using web bug","domain":"cybersecurity","path":"skills/implementing-deception-based-detection-with-canarytoken"},{"name":"implementing-delinea-secret-server-for-pam","description":"'Implements Delinea Secret Server for privileged access management (PAM) including secret vault configuration,","domain":"cybersecurity","path":"skills/implementing-delinea-secret-server-for-pam"},{"name":"implementing-device-posture-assessment-in-zero-trust","description":"'Implementing device posture assessment as a zero trust access control by integrating endpoint health signals","domain":"cybersecurity","path":"skills/implementing-device-posture-assessment-in-zero-trust"},{"name":"implementing-devsecops-security-scanning","description":"'Integrates Static Application Security Testing (SAST), Dynamic Application Security Testing (DAST), and Software","domain":"cybersecurity","path":"skills/implementing-devsecops-security-scanning"},{"name":"implementing-diamond-model-analysis","description":"The Diamond Model of Intrusion Analysis provides a structured framework for analyzing cyber intrusions by examining","domain":"cybersecurity","path":"skills/implementing-diamond-model-analysis"},{"name":"implementing-digital-signatures-with-ed25519","description":"Ed25519 is a high-performance digital signature algorithm using the Edwards curve Curve25519. It provides 128-bit","domain":"cybersecurity","path":"skills/implementing-digital-signatures-with-ed25519"},{"name":"implementing-disk-encryption-with-bitlocker","description":"'Implements full disk encryption using Microsoft BitLocker on Windows endpoints to protect data at rest from","domain":"cybersecurity","path":"skills/implementing-disk-encryption-with-bitlocker"},{"name":"implementing-dmarc-dkim-spf-email-security","description":"SPF, DKIM, and DMARC form the three pillars of email authentication. Together they prevent domain spoofing, validate","domain":"cybersecurity","path":"skills/implementing-dmarc-dkim-spf-email-security"},{"name":"implementing-dragos-platform-for-ot-monitoring","description":"'Deploy and configure the Dragos Platform for OT network monitoring, leveraging its 600+ industrial protocol","domain":"cybersecurity","path":"skills/implementing-dragos-platform-for-ot-monitoring"},{"name":"implementing-ebpf-security-monitoring","description":"'Implements eBPF-based security monitoring using Cilium Tetragon for real-time process execution tracking, network","domain":"cybersecurity","path":"skills/implementing-ebpf-security-monitoring"},{"name":"implementing-email-sandboxing-with-proofpoint","description":"Email sandboxing detonates suspicious attachments and URLs in isolated environments to detect zero-day malware","domain":"cybersecurity","path":"skills/implementing-email-sandboxing-with-proofpoint"},{"name":"implementing-end-to-end-encryption-for-messaging","description":"End-to-end encryption (E2EE) ensures that only the communicating parties can read messages, with no intermediary","domain":"cybersecurity","path":"skills/implementing-end-to-end-encryption-for-messaging"},{"name":"implementing-endpoint-detection-with-wazuh","description":"Deploy and configure Wazuh SIEM/XDR for endpoint detection including agent management, custom decoder and rule","domain":"cybersecurity","path":"skills/implementing-endpoint-detection-with-wazuh"},{"name":"implementing-endpoint-dlp-controls","description":"'Implements endpoint Data Loss Prevention (DLP) controls to detect and prevent sensitive data exfiltration through","domain":"cybersecurity","path":"skills/implementing-endpoint-dlp-controls"},{"name":"implementing-envelope-encryption-with-aws-kms","description":"Envelope encryption is a strategy where data is encrypted with a data encryption key (DEK), and the DEK itself","domain":"cybersecurity","path":"skills/implementing-envelope-encryption-with-aws-kms"},{"name":"implementing-epss-score-for-vulnerability-prioritization","description":"Integrate FIRST's Exploit Prediction Scoring System (EPSS) API to prioritize vulnerability remediation based","domain":"cybersecurity","path":"skills/implementing-epss-score-for-vulnerability-prioritization"},{"name":"implementing-file-integrity-monitoring-with-aide","description":"Configure AIDE (Advanced Intrusion Detection Environment) for file integrity monitoring including baseline creation,","domain":"cybersecurity","path":"skills/implementing-file-integrity-monitoring-with-aide"},{"name":"implementing-fuzz-testing-in-cicd-with-aflplusplus","description":"Integrate AFL++ coverage-guided fuzz testing into CI/CD pipelines to discover memory corruption, input handling,","domain":"cybersecurity","path":"skills/implementing-fuzz-testing-in-cicd-with-aflplusplus"},{"name":"implementing-gcp-binary-authorization","description":"Implement GCP Binary Authorization to enforce deploy-time security controls that ensure only trusted, attested","domain":"cybersecurity","path":"skills/implementing-gcp-binary-authorization"},{"name":"implementing-gcp-organization-policy-constraints","description":"Implement GCP Organization Policy constraints to enforce security guardrails across the entire resource hierarchy,","domain":"cybersecurity","path":"skills/implementing-gcp-organization-policy-constraints"},{"name":"implementing-gcp-vpc-firewall-rules","description":"'Implementing and auditing GCP VPC firewall rules to enforce network segmentation, restrict ingress and egress","domain":"cybersecurity","path":"skills/implementing-gcp-vpc-firewall-rules"},{"name":"implementing-gdpr-data-protection-controls","description":"The General Data Protection Regulation (EU) 2016/679 (GDPR) is the EU's comprehensive data protection law governing","domain":"cybersecurity","path":"skills/implementing-gdpr-data-protection-controls"},{"name":"implementing-gdpr-data-subject-access-request","description":"'Automates GDPR Data Subject Access Request (DSAR) workflows including identity verification, PII discovery across","domain":"cybersecurity","path":"skills/implementing-gdpr-data-subject-access-request"},{"name":"implementing-github-advanced-security-for-code-scanning","description":"Configure GitHub Advanced Security with CodeQL to perform automated static analysis and vulnerability detection","domain":"cybersecurity","path":"skills/implementing-github-advanced-security-for-code-scanning"},{"name":"implementing-google-workspace-admin-security","description":"'Implements comprehensive Google Workspace security hardening including admin console configuration, phishing-resistant","domain":"cybersecurity","path":"skills/implementing-google-workspace-admin-security"},{"name":"implementing-google-workspace-phishing-protection","description":"Configure Google Workspace advanced phishing and malware protection settings including pre-delivery scanning,","domain":"cybersecurity","path":"skills/implementing-google-workspace-phishing-protection"},{"name":"implementing-google-workspace-sso-configuration","description":"Configure SAML 2.0 single sign-on for Google Workspace with a third-party identity provider, enabling centralized","domain":"cybersecurity","path":"skills/implementing-google-workspace-sso-configuration"},{"name":"implementing-hardware-security-key-authentication","description":"'Implements FIDO2/WebAuthn hardware security key authentication including registration ceremonies, authentication","domain":"cybersecurity","path":"skills/implementing-hardware-security-key-authentication"},{"name":"implementing-hashicorp-vault-dynamic-secrets","description":"'Implements HashiCorp Vault dynamic secrets engines for database credentials, AWS IAM keys, and PKI certificates","domain":"cybersecurity","path":"skills/implementing-hashicorp-vault-dynamic-secrets"},{"name":"implementing-honeypot-for-ransomware-detection","description":"'Deploys canary files, honeypot shares, and decoy systems to detect ransomware activity at the earliest possible","domain":"cybersecurity","path":"skills/implementing-honeypot-for-ransomware-detection"},{"name":"implementing-honeytokens-for-breach-detection","description":"'Deploys canary tokens and honeytokens (fake AWS credentials, DNS canaries, document beacons, database records)","domain":"cybersecurity","path":"skills/implementing-honeytokens-for-breach-detection"},{"name":"implementing-ics-firewall-with-tofino","description":"'Deploy and configure Tofino industrial firewalls from Belden/Hirschmann to protect SCADA systems and PLCs using","domain":"cybersecurity","path":"skills/implementing-ics-firewall-with-tofino"},{"name":"implementing-identity-governance-with-sailpoint","description":"Deploy SailPoint IdentityNow or IdentityIQ for identity governance and administration. Covers identity lifecycle","domain":"cybersecurity","path":"skills/implementing-identity-governance-with-sailpoint"},{"name":"implementing-identity-verification-for-zero-trust","description":"Implement continuous identity verification for zero trust using phishing-resistant MFA (FIDO2/WebAuthn), risk-based","domain":"cybersecurity","path":"skills/implementing-identity-verification-for-zero-trust"},{"name":"implementing-iec-62443-security-zones","description":"'This skill covers designing and implementing security zones and conduits for industrial automation and control","domain":"cybersecurity","path":"skills/implementing-iec-62443-security-zones"},{"name":"implementing-image-provenance-verification-with-cosign","description":"Sign and verify container image provenance using Sigstore Cosign with keyless OIDC-based signing, attestations,","domain":"cybersecurity","path":"skills/implementing-image-provenance-verification-with-cosign"},{"name":"implementing-immutable-backup-with-restic","description":"'Implements immutable backup strategy using restic with S3-compatible storage and object lock for ransomware-resistant","domain":"cybersecurity","path":"skills/implementing-immutable-backup-with-restic"},{"name":"implementing-infrastructure-as-code-security-scanning","description":"'This skill covers implementing automated security scanning for Infrastructure as Code (IaC) templates using","domain":"cybersecurity","path":"skills/implementing-infrastructure-as-code-security-scanning"},{"name":"implementing-iso-27001-information-security-management","description":"ISO/IEC 27001:2022 is the international standard for establishing, implementing, maintaining, and continually improving an Information Security Management System (ISMS). This skill covers the complete","domain":"cybersecurity","path":"skills/implementing-iso-27001-information-security-management"},{"name":"implementing-just-in-time-access-provisioning","description":"Implement Just-In-Time (JIT) access provisioning to eliminate standing privileges by granting temporary, time-bound","domain":"cybersecurity","path":"skills/implementing-just-in-time-access-provisioning"},{"name":"implementing-jwt-signing-and-verification","description":"JSON Web Tokens (JWT) defined in RFC 7519 are compact, URL-safe tokens used for authentication and authorization","domain":"cybersecurity","path":"skills/implementing-jwt-signing-and-verification"},{"name":"implementing-kubernetes-network-policy-with-calico","description":"Implement Kubernetes network segmentation using Calico NetworkPolicy and GlobalNetworkPolicy for zero-trust pod-to-pod","domain":"cybersecurity","path":"skills/implementing-kubernetes-network-policy-with-calico"},{"name":"implementing-kubernetes-pod-security-standards","description":"Pod Security Standards (PSS) define three levels of security policies -- Privileged, Baseline, and Restricted","domain":"cybersecurity","path":"skills/implementing-kubernetes-pod-security-standards"},{"name":"implementing-llm-guardrails-for-security","description":"'Implements input and output validation guardrails for LLM-powered applications to prevent prompt injection,","domain":"cybersecurity","path":"skills/implementing-llm-guardrails-for-security"},{"name":"implementing-log-forwarding-with-fluentd","description":"Configure Fluentd and Fluent Bit for centralized log aggregation, routing, filtering, and enrichment across distributed","domain":"cybersecurity","path":"skills/implementing-log-forwarding-with-fluentd"},{"name":"implementing-log-integrity-with-blockchain","description":"Build an append-only log integrity chain using SHA-256 hash chaining for tamper detection. Each log entry is","domain":"cybersecurity","path":"skills/implementing-log-integrity-with-blockchain"},{"name":"implementing-memory-protection-with-dep-aslr","description":"'Implements memory protection mechanisms including DEP (Data Execution Prevention), ASLR (Address Space Layout","domain":"cybersecurity","path":"skills/implementing-memory-protection-with-dep-aslr"},{"name":"implementing-microsegmentation-with-guardicore","description":"'Implementing microsegmentation using Akamai Guardicore Segmentation to map application dependencies, create","domain":"cybersecurity","path":"skills/implementing-microsegmentation-with-guardicore"},{"name":"implementing-mimecast-targeted-attack-protection","description":"Deploy Mimecast Targeted Threat Protection including URL Protect, Attachment Protect, Impersonation Protect,","domain":"cybersecurity","path":"skills/implementing-mimecast-targeted-attack-protection"},{"name":"implementing-mitre-attack-coverage-mapping","description":"Implement MITRE ATT&CK coverage mapping to identify detection gaps, prioritize rule development, and measure","domain":"cybersecurity","path":"skills/implementing-mitre-attack-coverage-mapping"},{"name":"implementing-mobile-application-management","description":"'Implements Mobile Application Management (MAM) policies to protect enterprise data on managed and unmanaged","domain":"cybersecurity","path":"skills/implementing-mobile-application-management"},{"name":"implementing-mtls-for-zero-trust-services","description":"'Configures mutual TLS (mTLS) authentication between microservices using Python cryptography library for certificate","domain":"cybersecurity","path":"skills/implementing-mtls-for-zero-trust-services"},{"name":"implementing-nerc-cip-compliance-controls","description":"'This skill covers implementing North American Electric Reliability Corporation Critical Infrastructure Protection","domain":"cybersecurity","path":"skills/implementing-nerc-cip-compliance-controls"},{"name":"implementing-network-access-control","description":"'Implements 802.1X port-based network access control using RADIUS authentication, PacketFence NAC, and switch","domain":"cybersecurity","path":"skills/implementing-network-access-control"},{"name":"implementing-network-access-control-with-cisco-ise","description":"Deploy Cisco Identity Services Engine for 802.1X wired and wireless authentication, MAC Authentication Bypass,","domain":"cybersecurity","path":"skills/implementing-network-access-control-with-cisco-ise"},{"name":"implementing-network-deception-with-honeypots","description":"Deploy and manage network honeypots using OpenCanary, T-Pot, or Cowrie to detect unauthorized access, lateral","domain":"cybersecurity","path":"skills/implementing-network-deception-with-honeypots"},{"name":"implementing-network-intrusion-prevention-with-suricata","description":"Deploy and configure Suricata as a network intrusion prevention system with custom rules, Emerging Threats rulesets,","domain":"cybersecurity","path":"skills/implementing-network-intrusion-prevention-with-suricata"},{"name":"implementing-network-policies-for-kubernetes","description":"Kubernetes NetworkPolicies provide pod-level network segmentation by defining ingress and egress rules that control","domain":"cybersecurity","path":"skills/implementing-network-policies-for-kubernetes"},{"name":"implementing-network-segmentation-for-ot","description":"'This skill covers implementing network segmentation in Operational Technology environments using VLANs, industrial","domain":"cybersecurity","path":"skills/implementing-network-segmentation-for-ot"},{"name":"implementing-network-segmentation-with-firewall-zones","description":"Design and implement network segmentation using firewall security zones, VLANs, ACLs, and microsegmentation policies","domain":"cybersecurity","path":"skills/implementing-network-segmentation-with-firewall-zones"},{"name":"implementing-network-traffic-analysis-with-arkime","description":"Deploy and query Arkime (formerly Moloch) for full packet capture network traffic analysis. Uses the Arkime API","domain":"cybersecurity","path":"skills/implementing-network-traffic-analysis-with-arkime"},{"name":"implementing-network-traffic-baselining","description":"Build network traffic baselines from NetFlow/IPFIX data using Python pandas for statistical analysis, z-score","domain":"cybersecurity","path":"skills/implementing-network-traffic-baselining"},{"name":"implementing-next-generation-firewall-with-palo-alto","description":"Configure and deploy Palo Alto Networks next-generation firewalls with App-ID, User-ID, zone-based policies,","domain":"cybersecurity","path":"skills/implementing-next-generation-firewall-with-palo-alto"},{"name":"implementing-opa-gatekeeper-for-policy-enforcement","description":"Enforce Kubernetes admission policies using OPA Gatekeeper with ConstraintTemplates, Rego rules, and the Gatekeeper","domain":"cybersecurity","path":"skills/implementing-opa-gatekeeper-for-policy-enforcement"},{"name":"implementing-ot-incident-response-playbook","description":"'Develop and implement OT-specific incident response playbooks aligned with SANS PICERL framework, IEC 62443,","domain":"cybersecurity","path":"skills/implementing-ot-incident-response-playbook"},{"name":"implementing-ot-network-traffic-analysis-with-nozomi","description":"'Deploy Nozomi Networks Guardian sensors for passive OT network traffic analysis to achieve comprehensive asset","domain":"cybersecurity","path":"skills/implementing-ot-network-traffic-analysis-with-nozomi"},{"name":"implementing-pam-for-database-access","description":"Deploy privileged access management for database systems including Oracle, SQL Server, PostgreSQL, and MySQL.","domain":"cybersecurity","path":"skills/implementing-pam-for-database-access"},{"name":"implementing-passwordless-auth-with-microsoft-entra","description":"'Implements passwordless authentication using Microsoft Entra ID with FIDO2 security keys, Windows Hello for","domain":"cybersecurity","path":"skills/implementing-passwordless-auth-with-microsoft-entra"},{"name":"implementing-passwordless-authentication-with-fido2","description":"Deploy FIDO2/WebAuthn passwordless authentication using security keys and platform authenticators. Covers WebAuthn","domain":"cybersecurity","path":"skills/implementing-passwordless-authentication-with-fido2"},{"name":"implementing-patch-management-for-ot-systems","description":"'This skill covers implementing a structured patch management program for OT/ICS environments where traditional","domain":"cybersecurity","path":"skills/implementing-patch-management-for-ot-systems"},{"name":"implementing-patch-management-workflow","description":"Patch management is the systematic process of identifying, testing, deploying, and verifying software updates","domain":"cybersecurity","path":"skills/implementing-patch-management-workflow"},{"name":"implementing-pci-dss-compliance-controls","description":"PCI DSS 4.0.1 establishes 12 requirements across 6 control objectives for organizations that store, process, or transmit cardholder data. With PCI DSS 3.2.1 retiring April 2024 and 51 new requirements","domain":"cybersecurity","path":"skills/implementing-pci-dss-compliance-controls"},{"name":"implementing-pod-security-admission-controller","description":"Implement Kubernetes Pod Security Admission to enforce baseline and restricted security profiles at namespace","domain":"cybersecurity","path":"skills/implementing-pod-security-admission-controller"},{"name":"implementing-policy-as-code-with-open-policy-agent","description":"'This skill covers implementing Open Policy Agent (OPA) and Gatekeeper for policy-as-code enforcement in Kubernetes","domain":"cybersecurity","path":"skills/implementing-policy-as-code-with-open-policy-agent"},{"name":"implementing-privileged-access-management-with-cyberark","description":"Deploy CyberArk Privileged Access Management to discover, vault, rotate, and monitor privileged credentials across","domain":"cybersecurity","path":"skills/implementing-privileged-access-management-with-cyberark"},{"name":"implementing-privileged-access-workstation","description":"Design and implement Privileged Access Workstations (PAWs) with device hardening, just-in-time access, and integration","domain":"cybersecurity","path":"skills/implementing-privileged-access-workstation"},{"name":"implementing-privileged-session-monitoring","description":"'Implements privileged session monitoring and recording using Privileged Access Management (PAM) solutions, focusing","domain":"cybersecurity","path":"skills/implementing-privileged-session-monitoring"},{"name":"implementing-proofpoint-email-security-gateway","description":"Deploy and configure Proofpoint Email Protection as a secure email gateway to detect and block phishing, malware,","domain":"cybersecurity","path":"skills/implementing-proofpoint-email-security-gateway"},{"name":"implementing-purdue-model-network-segmentation","description":"'Implement network segmentation based on the Purdue Enterprise Reference Architecture (PERA) model to separate","domain":"cybersecurity","path":"skills/implementing-purdue-model-network-segmentation"},{"name":"implementing-ransomware-backup-strategy","description":"'Designs and implements a ransomware-resilient backup strategy following the 3-2-1-1-0 methodology (3 copies,","domain":"cybersecurity","path":"skills/implementing-ransomware-backup-strategy"},{"name":"implementing-ransomware-kill-switch-detection","description":"'Detects and exploits ransomware kill switch mechanisms including mutex-based execution guards, domain-based","domain":"cybersecurity","path":"skills/implementing-ransomware-kill-switch-detection"},{"name":"implementing-rapid7-insightvm-for-scanning","description":"Deploy and configure Rapid7 InsightVM Security Console and Scan Engines for authenticated and unauthenticated","domain":"cybersecurity","path":"skills/implementing-rapid7-insightvm-for-scanning"},{"name":"implementing-rbac-hardening-for-kubernetes","description":"Harden Kubernetes Role-Based Access Control by implementing least-privilege policies, auditing role bindings,","domain":"cybersecurity","path":"skills/implementing-rbac-hardening-for-kubernetes"},{"name":"implementing-rsa-key-pair-management","description":"RSA (Rivest-Shamir-Adleman) is the most widely deployed asymmetric cryptographic algorithm, used for digital","domain":"cybersecurity","path":"skills/implementing-rsa-key-pair-management"},{"name":"implementing-runtime-application-self-protection","description":"Deploy Runtime Application Self-Protection (RASP) agents to detect and block attacks from within application","domain":"cybersecurity","path":"skills/implementing-runtime-application-self-protection"},{"name":"implementing-runtime-security-with-tetragon","description":"Implement eBPF-based runtime security observability and enforcement in Kubernetes clusters using Cilium Tetragon","domain":"cybersecurity","path":"skills/implementing-runtime-security-with-tetragon"},{"name":"implementing-saml-sso-with-okta","description":"Implement SAML 2.0 Single Sign-On (SSO) using Okta as the Identity Provider (IdP). This skill covers end-to-end","domain":"cybersecurity","path":"skills/implementing-saml-sso-with-okta"},{"name":"implementing-scim-provisioning-with-okta","description":"Implement automated user provisioning and deprovisioning using SCIM 2.0 protocol with Okta as the identity provider.","domain":"cybersecurity","path":"skills/implementing-scim-provisioning-with-okta"},{"name":"implementing-secret-scanning-with-gitleaks","description":"'This skill covers implementing Gitleaks for detecting and preventing hardcoded secrets in git repositories.","domain":"cybersecurity","path":"skills/implementing-secret-scanning-with-gitleaks"},{"name":"implementing-secrets-management-with-vault","description":"'This skill covers deploying HashiCorp Vault for centralized secrets management across cloud environments, including","domain":"cybersecurity","path":"skills/implementing-secrets-management-with-vault"},{"name":"implementing-secrets-scanning-in-ci-cd","description":"Integrate gitleaks and trufflehog into CI/CD pipelines to detect leaked secrets before deployment","domain":"cybersecurity","path":"skills/implementing-secrets-scanning-in-ci-cd"},{"name":"implementing-security-chaos-engineering","description":"'Implements security chaos engineering experiments that deliberately disable or degrade security controls to","domain":"cybersecurity","path":"skills/implementing-security-chaos-engineering"},{"name":"implementing-security-information-sharing-with-stix2","description":"'Create, validate, and share STIX 2.1 threat intelligence objects using the stix2 Python library. Covers indicators,","domain":"cybersecurity","path":"skills/implementing-security-information-sharing-with-stix2"},{"name":"implementing-security-monitoring-with-datadog","description":"'Implements security monitoring using Datadog Cloud SIEM, Cloud Security Management (CSM), and Workload Protection","domain":"cybersecurity","path":"skills/implementing-security-monitoring-with-datadog"},{"name":"implementing-semgrep-for-custom-sast-rules","description":"Write custom Semgrep SAST rules in YAML to detect application-specific vulnerabilities, enforce coding standards,","domain":"cybersecurity","path":"skills/implementing-semgrep-for-custom-sast-rules"},{"name":"implementing-siem-correlation-rules-for-apt","description":"Write multi-event correlation rules that detect APT lateral movement by chaining Windows authentication events,","domain":"cybersecurity","path":"skills/implementing-siem-correlation-rules-for-apt"},{"name":"implementing-siem-use-case-tuning","description":"Tune SIEM detection rules to reduce false positives by analyzing alert volumes, creating whitelists, adjusting","domain":"cybersecurity","path":"skills/implementing-siem-use-case-tuning"},{"name":"implementing-siem-use-cases-for-detection","description":"'Implements SIEM detection use cases by designing correlation rules, threshold alerts, and behavioral analytics","domain":"cybersecurity","path":"skills/implementing-siem-use-cases-for-detection"},{"name":"implementing-sigstore-for-software-signing","description":"'Implements Sigstore-based software signing and verification using Cosign keyless signing, Rekor transparency","domain":"cybersecurity","path":"skills/implementing-sigstore-for-software-signing"},{"name":"implementing-soar-automation-with-phantom","description":"'Implements Security Orchestration, Automation, and Response (SOAR) workflows using Splunk SOAR (formerly Phantom)","domain":"cybersecurity","path":"skills/implementing-soar-automation-with-phantom"},{"name":"implementing-soar-playbook-for-phishing","description":"Automate phishing incident response using Splunk SOAR REST API to create containers, add artifacts, and trigger","domain":"cybersecurity","path":"skills/implementing-soar-playbook-for-phishing"},{"name":"implementing-soar-playbook-with-palo-alto-xsoar","description":"Implement automated incident response playbooks in Cortex XSOAR to orchestrate security workflows across SOC","domain":"cybersecurity","path":"skills/implementing-soar-playbook-with-palo-alto-xsoar"},{"name":"implementing-stix-taxii-feed-integration","description":"STIX (Structured Threat Information eXpression) and TAXII (Trusted Automated eXchange of Intelligence Information)","domain":"cybersecurity","path":"skills/implementing-stix-taxii-feed-integration"},{"name":"implementing-supply-chain-security-with-in-toto","description":"Implement software supply chain integrity verification for container builds using the in-toto framework to create","domain":"cybersecurity","path":"skills/implementing-supply-chain-security-with-in-toto"},{"name":"implementing-syslog-centralization-with-rsyslog","description":"Configure rsyslog for centralized log collection with TLS encryption, custom templates, and log rotation. Generates","domain":"cybersecurity","path":"skills/implementing-syslog-centralization-with-rsyslog"},{"name":"implementing-taxii-server-with-opentaxii","description":"Deploy and configure an OpenTAXII server to share and consume STIX-formatted cyber threat intelligence using","domain":"cybersecurity","path":"skills/implementing-taxii-server-with-opentaxii"},{"name":"implementing-threat-intelligence-lifecycle-management","description":"Implement a structured threat intelligence lifecycle encompassing planning, collection, processing, analysis,","domain":"cybersecurity","path":"skills/implementing-threat-intelligence-lifecycle-management"},{"name":"implementing-threat-modeling-with-mitre-attack","description":"'Implements threat modeling using the MITRE ATT&CK framework to map adversary TTPs against organizational assets,","domain":"cybersecurity","path":"skills/implementing-threat-modeling-with-mitre-attack"},{"name":"implementing-ticketing-system-for-incidents","description":"'Implements an integrated incident ticketing system connecting SIEM alerts to ServiceNow, Jira, or TheHive for","domain":"cybersecurity","path":"skills/implementing-ticketing-system-for-incidents"},{"name":"implementing-usb-device-control-policy","description":"'Implements USB device control policies to restrict unauthorized removable media access on endpoints, preventing","domain":"cybersecurity","path":"skills/implementing-usb-device-control-policy"},{"name":"implementing-velociraptor-for-ir-collection","description":"Deploy and configure Velociraptor for scalable endpoint forensic artifact collection during incident response","domain":"cybersecurity","path":"skills/implementing-velociraptor-for-ir-collection"},{"name":"implementing-vulnerability-management-with-greenbone","description":"Deploy and operate Greenbone/OpenVAS vulnerability management using the python-gvm library to create scan targets,","domain":"cybersecurity","path":"skills/implementing-vulnerability-management-with-greenbone"},{"name":"implementing-vulnerability-remediation-sla","description":"Vulnerability remediation SLAs define mandatory timeframes for patching or mitigating identified vulnerabilities","domain":"cybersecurity","path":"skills/implementing-vulnerability-remediation-sla"},{"name":"implementing-vulnerability-sla-breach-alerting","description":"Build automated alerting for vulnerability remediation SLA breaches with severity-based timelines, escalation","domain":"cybersecurity","path":"skills/implementing-vulnerability-sla-breach-alerting"},{"name":"implementing-web-application-logging-with-modsecurity","description":"'Configure ModSecurity WAF with OWASP Core Rule Set (CRS) for web application logging, tune rules to reduce false","domain":"cybersecurity","path":"skills/implementing-web-application-logging-with-modsecurity"},{"name":"implementing-zero-knowledge-proof-for-authentication","description":"Zero-Knowledge Proofs (ZKPs) allow a prover to demonstrate knowledge of a secret (such as a password or private","domain":"cybersecurity","path":"skills/implementing-zero-knowledge-proof-for-authentication"},{"name":"implementing-zero-standing-privilege-with-cyberark","description":"Deploy CyberArk Secure Cloud Access to eliminate standing privileges in hybrid and multi-cloud environments using","domain":"cybersecurity","path":"skills/implementing-zero-standing-privilege-with-cyberark"},{"name":"implementing-zero-trust-dns-with-nextdns","description":"Implement NextDNS as a zero trust DNS filtering layer with encrypted resolution, threat intelligence blocking,","domain":"cybersecurity","path":"skills/implementing-zero-trust-dns-with-nextdns"},{"name":"implementing-zero-trust-for-saas-applications","description":"'Implementing zero trust access controls for SaaS applications using CASB, SSPM, conditional access policies,","domain":"cybersecurity","path":"skills/implementing-zero-trust-for-saas-applications"},{"name":"implementing-zero-trust-in-cloud","description":"'This skill guides organizations through implementing zero trust architecture in cloud environments following","domain":"cybersecurity","path":"skills/implementing-zero-trust-in-cloud"},{"name":"implementing-zero-trust-network-access","description":"'Implementing Zero Trust Network Access (ZTNA) in cloud environments by configuring identity-aware proxies, micro-segmentation,","domain":"cybersecurity","path":"skills/implementing-zero-trust-network-access"},{"name":"implementing-zero-trust-network-access-with-zscaler","description":"Implement Zero Trust Network Access using Zscaler Private Access (ZPA) to replace traditional VPN with identity-based,","domain":"cybersecurity","path":"skills/implementing-zero-trust-network-access-with-zscaler"},{"name":"implementing-zero-trust-with-beyondcorp","description":"Deploy Google BeyondCorp Enterprise zero trust access controls using Identity-Aware Proxy (IAP), context-aware","domain":"cybersecurity","path":"skills/implementing-zero-trust-with-beyondcorp"},{"name":"implementing-zero-trust-with-hashicorp-boundary","description":"Implement HashiCorp Boundary for identity-aware zero trust infrastructure access management with dynamic credential","domain":"cybersecurity","path":"skills/implementing-zero-trust-with-hashicorp-boundary"},{"name":"integrating-dast-with-owasp-zap-in-pipeline","description":"'This skill covers integrating OWASP ZAP (Zed Attack Proxy) for Dynamic Application Security Testing in CI/CD","domain":"cybersecurity","path":"skills/integrating-dast-with-owasp-zap-in-pipeline"},{"name":"integrating-sast-into-github-actions-pipeline","description":"'This skill covers integrating Static Application Security Testing (SAST) tools\u2014CodeQL and Semgrep\u2014into GitHub","domain":"cybersecurity","path":"skills/integrating-sast-into-github-actions-pipeline"},{"name":"intercepting-mobile-traffic-with-burpsuite","description":"'Intercepts and analyzes HTTP/HTTPS traffic from mobile applications using Burp Suite proxy to identify insecure","domain":"cybersecurity","path":"skills/intercepting-mobile-traffic-with-burpsuite"},{"name":"investigating-insider-threat-indicators","description":"'Investigates insider threat indicators including data exfiltration attempts, unauthorized access patterns, policy","domain":"cybersecurity","path":"skills/investigating-insider-threat-indicators"},{"name":"investigating-phishing-email-incident","description":"'Investigates phishing email incidents from initial user report through header analysis, URL/attachment detonation,","domain":"cybersecurity","path":"skills/investigating-phishing-email-incident"},{"name":"investigating-ransomware-attack-artifacts","description":"Identify, collect, and analyze ransomware attack artifacts to determine the variant, initial access vector, encryption","domain":"cybersecurity","path":"skills/investigating-ransomware-attack-artifacts"},{"name":"managing-cloud-identity-with-okta","description":"'This skill covers implementing Okta as a centralized identity provider for cloud environments, configuring SSO","domain":"cybersecurity","path":"skills/managing-cloud-identity-with-okta"},{"name":"managing-intelligence-lifecycle","description":"'Manages the end-to-end cyber threat intelligence lifecycle from planning and direction through collection, processing,","domain":"cybersecurity","path":"skills/managing-intelligence-lifecycle"},{"name":"mapping-mitre-attack-techniques","description":"'Maps observed adversary behaviors, security alerts, and detection rules to MITRE ATT&CK techniques and sub-techniques","domain":"cybersecurity","path":"skills/mapping-mitre-attack-techniques"},{"name":"monitoring-darkweb-sources","description":"'Monitors dark web forums, marketplaces, paste sites, and ransomware leak sites for mentions of organizational","domain":"cybersecurity","path":"skills/monitoring-darkweb-sources"},{"name":"monitoring-scada-modbus-traffic-anomalies","description":"'Monitors Modbus TCP traffic on SCADA and ICS networks to detect anomalous function code usage, unauthorized","domain":"cybersecurity","path":"skills/monitoring-scada-modbus-traffic-anomalies"},{"name":"performing-access-recertification-with-saviynt","description":"Configure and execute access recertification campaigns in Saviynt Enterprise Identity Cloud to validate user","domain":"cybersecurity","path":"skills/performing-access-recertification-with-saviynt"},{"name":"performing-access-review-and-certification","description":"Conduct systematic access reviews and certifications to ensure users have appropriate access rights aligned with","domain":"cybersecurity","path":"skills/performing-access-review-and-certification"},{"name":"performing-active-directory-bloodhound-analysis","description":"Use BloodHound and SharpHound to enumerate Active Directory relationships and identify attack paths from compromised","domain":"cybersecurity","path":"skills/performing-active-directory-bloodhound-analysis"},{"name":"performing-active-directory-compromise-investigation","description":"Investigate Active Directory compromise by analyzing authentication logs, replication metadata, Group Policy","domain":"cybersecurity","path":"skills/performing-active-directory-compromise-investigation"},{"name":"performing-active-directory-forest-trust-attack","description":"Enumerate and audit Active Directory forest trust relationships using impacket for SID filtering analysis, trust","domain":"cybersecurity","path":"skills/performing-active-directory-forest-trust-attack"},{"name":"performing-active-directory-penetration-test","description":"Conduct a focused Active Directory penetration test to enumerate domain objects, discover attack paths with BloodHound,","domain":"cybersecurity","path":"skills/performing-active-directory-penetration-test"},{"name":"performing-active-directory-vulnerability-assessment","description":"Assess Active Directory security posture using PingCastle, BloodHound, and Purple Knight to identify misconfigurations,","domain":"cybersecurity","path":"skills/performing-active-directory-vulnerability-assessment"},{"name":"performing-adversary-in-the-middle-phishing-detection","description":"Detect and respond to Adversary-in-the-Middle (AiTM) phishing attacks that use reverse proxy kits like EvilProxy,","domain":"cybersecurity","path":"skills/performing-adversary-in-the-middle-phishing-detection"},{"name":"performing-agentless-vulnerability-scanning","description":"Configure and execute agentless vulnerability scanning using network protocols, cloud snapshot analysis, and","domain":"cybersecurity","path":"skills/performing-agentless-vulnerability-scanning"},{"name":"performing-ai-driven-osint-correlation","description":"Use AI and LLM-based reasoning to correlate findings across multiple OSINT sources\u2014username enumeration, email","domain":"cybersecurity","path":"skills/performing-ai-driven-osint-correlation"},{"name":"performing-alert-triage-with-elastic-siem","description":"Perform systematic alert triage in Elastic Security SIEM to rapidly classify, prioritize, and investigate security","domain":"cybersecurity","path":"skills/performing-alert-triage-with-elastic-siem"},{"name":"performing-android-app-static-analysis-with-mobsf","description":"'Performs automated static analysis of Android applications using Mobile Security Framework (MobSF) to identify","domain":"cybersecurity","path":"skills/performing-android-app-static-analysis-with-mobsf"},{"name":"performing-api-fuzzing-with-restler","description":"'Uses Microsoft RESTler to perform stateful REST API fuzzing by automatically generating and executing test sequences","domain":"cybersecurity","path":"skills/performing-api-fuzzing-with-restler"},{"name":"performing-api-inventory-and-discovery","description":"'Performs API inventory and discovery to identify all API endpoints in an organization''s environment including","domain":"cybersecurity","path":"skills/performing-api-inventory-and-discovery"},{"name":"performing-api-rate-limiting-bypass","description":"'Tests API rate limiting implementations for bypass vulnerabilities by manipulating request headers, IP addresses,","domain":"cybersecurity","path":"skills/performing-api-rate-limiting-bypass"},{"name":"performing-api-security-testing-with-postman","description":"'Uses Postman to perform structured API security testing by building collections that test for OWASP API Security","domain":"cybersecurity","path":"skills/performing-api-security-testing-with-postman"},{"name":"performing-arp-spoofing-attack-simulation","description":"'Simulates ARP spoofing attacks in authorized lab or pentest environments using arpspoof, Ettercap, and Scapy","domain":"cybersecurity","path":"skills/performing-arp-spoofing-attack-simulation"},{"name":"performing-asset-criticality-scoring-for-vulns","description":"Develop and apply a multi-factor asset criticality scoring model to weight vulnerability prioritization based","domain":"cybersecurity","path":"skills/performing-asset-criticality-scoring-for-vulns"},{"name":"performing-authenticated-scan-with-openvas","description":"Configure and execute authenticated vulnerability scans using OpenVAS/Greenbone Vulnerability Management with","domain":"cybersecurity","path":"skills/performing-authenticated-scan-with-openvas"},{"name":"performing-authenticated-vulnerability-scan","description":"Authenticated (credentialed) vulnerability scanning uses valid system credentials to log into target hosts and","domain":"cybersecurity","path":"skills/performing-authenticated-vulnerability-scan"},{"name":"performing-automated-malware-analysis-with-cape","description":"Deploy and operate CAPEv2 sandbox for automated malware analysis with behavioral monitoring, payload extraction,","domain":"cybersecurity","path":"skills/performing-automated-malware-analysis-with-cape"},{"name":"performing-aws-account-enumeration-with-scout-suite","description":"Perform comprehensive security posture assessment of AWS accounts using ScoutSuite to enumerate resources, identify","domain":"cybersecurity","path":"skills/performing-aws-account-enumeration-with-scout-suite"},{"name":"performing-aws-privilege-escalation-assessment","description":"'Performing authorized privilege escalation assessments in AWS environments to identify IAM misconfigurations","domain":"cybersecurity","path":"skills/performing-aws-privilege-escalation-assessment"},{"name":"performing-bandwidth-throttling-attack-simulation","description":"'Simulates bandwidth throttling and network degradation attacks using tc, iperf3, and Scapy in authorized environments","domain":"cybersecurity","path":"skills/performing-bandwidth-throttling-attack-simulation"},{"name":"performing-binary-exploitation-analysis","description":"'Analyze binary exploitation techniques including buffer overflows and ROP chains using pwntools Python library.","domain":"cybersecurity","path":"skills/performing-binary-exploitation-analysis"},{"name":"performing-blind-ssrf-exploitation","description":"Detect and exploit blind Server-Side Request Forgery vulnerabilities using out-of-band techniques, DNS interactions,","domain":"cybersecurity","path":"skills/performing-blind-ssrf-exploitation"},{"name":"performing-bluetooth-security-assessment","description":"Assess Bluetooth Low Energy device security by scanning, enumerating GATT services, and detecting vulnerabilities","domain":"cybersecurity","path":"skills/performing-bluetooth-security-assessment"},{"name":"performing-brand-monitoring-for-impersonation","description":"Monitor for brand impersonation attacks across domains, social media, mobile apps, and dark web channels to detect","domain":"cybersecurity","path":"skills/performing-brand-monitoring-for-impersonation"},{"name":"performing-clickjacking-attack-test","description":"Testing web applications for clickjacking vulnerabilities by assessing frame embedding controls and crafting","domain":"cybersecurity","path":"skills/performing-clickjacking-attack-test"},{"name":"performing-cloud-asset-inventory-with-cartography","description":"Perform comprehensive cloud asset inventory and relationship mapping using Cartography to build a Neo4j security","domain":"cybersecurity","path":"skills/performing-cloud-asset-inventory-with-cartography"},{"name":"performing-cloud-forensics-investigation","description":"Conduct forensic investigations in cloud environments by collecting and analyzing logs, snapshots, and metadata","domain":"cybersecurity","path":"skills/performing-cloud-forensics-investigation"},{"name":"performing-cloud-forensics-with-aws-cloudtrail","description":"Perform forensic investigation of AWS environments using CloudTrail logs to reconstruct attacker activity, identify","domain":"cybersecurity","path":"skills/performing-cloud-forensics-with-aws-cloudtrail"},{"name":"performing-cloud-incident-containment-procedures","description":"Execute cloud-native incident containment across AWS, Azure, and GCP by isolating compromised resources, revoking","domain":"cybersecurity","path":"skills/performing-cloud-incident-containment-procedures"},{"name":"performing-cloud-log-forensics-with-athena","description":"'Uses AWS Athena to query CloudTrail, VPC Flow Logs, S3 access logs, and ALB logs for forensic investigation.","domain":"cybersecurity","path":"skills/performing-cloud-log-forensics-with-athena"},{"name":"performing-cloud-native-forensics-with-falco","description":"'Uses Falco YAML rules for runtime threat detection in containers and Kubernetes, monitoring syscalls for shell","domain":"cybersecurity","path":"skills/performing-cloud-native-forensics-with-falco"},{"name":"performing-cloud-native-threat-hunting-with-aws-detective","description":"Hunt for threats in AWS environments using Detective behavior graphs, entity investigation timelines, GuardDuty","domain":"cybersecurity","path":"skills/performing-cloud-native-threat-hunting-with-aws-detective"},{"name":"performing-cloud-penetration-testing-with-pacu","description":"'Performing authorized AWS penetration testing using Pacu, the open-source AWS exploitation framework, to enumerate","domain":"cybersecurity","path":"skills/performing-cloud-penetration-testing-with-pacu"},{"name":"performing-cloud-storage-forensic-acquisition","description":"Perform forensic acquisition and analysis of cloud storage services including Google Drive, OneDrive, Dropbox,","domain":"cybersecurity","path":"skills/performing-cloud-storage-forensic-acquisition"},{"name":"performing-container-escape-detection","description":"'Detects container escape attempts by analyzing namespace configurations, privileged container checks, dangerous","domain":"cybersecurity","path":"skills/performing-container-escape-detection"},{"name":"performing-container-image-hardening","description":"'This skill covers hardening container images by minimizing attack surface, removing unnecessary packages, implementing","domain":"cybersecurity","path":"skills/performing-container-image-hardening"},{"name":"performing-container-security-scanning-with-trivy","description":"Scan container images, filesystems, and Kubernetes manifests for vulnerabilities, misconfigurations, exposed","domain":"cybersecurity","path":"skills/performing-container-security-scanning-with-trivy"},{"name":"performing-content-security-policy-bypass","description":"Analyze and bypass Content Security Policy implementations to achieve cross-site scripting by exploiting misconfigurations,","domain":"cybersecurity","path":"skills/performing-content-security-policy-bypass"},{"name":"performing-credential-access-with-lazagne","description":"Extract stored credentials from compromised endpoints using the LaZagne post-exploitation tool to recover passwords","domain":"cybersecurity","path":"skills/performing-credential-access-with-lazagne"},{"name":"performing-cryptographic-audit-of-application","description":"A cryptographic audit systematically reviews an application's use of cryptographic primitives, protocols, and","domain":"cybersecurity","path":"skills/performing-cryptographic-audit-of-application"},{"name":"performing-csrf-attack-simulation","description":"Testing web applications for Cross-Site Request Forgery vulnerabilities by crafting forged requests that exploit","domain":"cybersecurity","path":"skills/performing-csrf-attack-simulation"},{"name":"performing-cve-prioritization-with-kev-catalog","description":"Leverage the CISA Known Exploited Vulnerabilities catalog alongside EPSS and CVSS to prioritize CVE remediation","domain":"cybersecurity","path":"skills/performing-cve-prioritization-with-kev-catalog"},{"name":"performing-dark-web-monitoring-for-threats","description":"Dark web monitoring involves systematically scanning Tor hidden services, underground forums, paste sites, and","domain":"cybersecurity","path":"skills/performing-dark-web-monitoring-for-threats"},{"name":"performing-deception-technology-deployment","description":"'Deploys deception technology including honeypots, honeytokens, and decoy systems to detect attackers who have","domain":"cybersecurity","path":"skills/performing-deception-technology-deployment"},{"name":"performing-directory-traversal-testing","description":"Testing web applications for path traversal vulnerabilities that allow reading or writing arbitrary files on","domain":"cybersecurity","path":"skills/performing-directory-traversal-testing"},{"name":"performing-disk-forensics-investigation","description":"'Conducts disk forensics investigations using forensic imaging, file system analysis, artifact recovery, and","domain":"cybersecurity","path":"skills/performing-disk-forensics-investigation"},{"name":"performing-dmarc-policy-enforcement-rollout","description":"Execute a phased DMARC rollout from p=none monitoring through p=quarantine to p=reject enforcement, ensuring","domain":"cybersecurity","path":"skills/performing-dmarc-policy-enforcement-rollout"},{"name":"performing-dns-enumeration-and-zone-transfer","description":"'Enumerates DNS records, attempts zone transfers, brute-forces subdomains, and maps DNS infrastructure during","domain":"cybersecurity","path":"skills/performing-dns-enumeration-and-zone-transfer"},{"name":"performing-dns-tunneling-detection","description":"'Detects DNS tunneling by computing Shannon entropy of DNS query names, analyzing query length distributions,","domain":"cybersecurity","path":"skills/performing-dns-tunneling-detection"},{"name":"performing-docker-bench-security-assessment","description":"Docker Bench for Security is an open-source script that checks dozens of common best practices around deploying","domain":"cybersecurity","path":"skills/performing-docker-bench-security-assessment"},{"name":"performing-dynamic-analysis-of-android-app","description":"'Performs runtime dynamic analysis of Android applications using Frida, Objection, and Android Debug Bridge to","domain":"cybersecurity","path":"skills/performing-dynamic-analysis-of-android-app"},{"name":"performing-dynamic-analysis-with-any-run","description":"'Performs interactive dynamic malware analysis using the ANY.RUN cloud sandbox to observe real-time execution","domain":"cybersecurity","path":"skills/performing-dynamic-analysis-with-any-run"},{"name":"performing-endpoint-forensics-investigation","description":"'Performs digital forensics investigation on compromised endpoints including memory acquisition, disk imaging,","domain":"cybersecurity","path":"skills/performing-endpoint-forensics-investigation"},{"name":"performing-endpoint-vulnerability-remediation","description":"'Performs vulnerability remediation on endpoints by prioritizing CVEs based on risk scoring, deploying patches,","domain":"cybersecurity","path":"skills/performing-endpoint-vulnerability-remediation"},{"name":"performing-entitlement-review-with-sailpoint-iiq","description":"'Performs entitlement review and access certification campaigns using SailPoint IdentityIQ including manager","domain":"cybersecurity","path":"skills/performing-entitlement-review-with-sailpoint-iiq"},{"name":"performing-external-network-penetration-test","description":"Conduct a comprehensive external network penetration test to identify vulnerabilities in internet-facing infrastructure","domain":"cybersecurity","path":"skills/performing-external-network-penetration-test"},{"name":"performing-false-positive-reduction-in-siem","description":"Perform systematic SIEM false positive reduction through rule tuning, threshold adjustment, correlation refinement,","domain":"cybersecurity","path":"skills/performing-false-positive-reduction-in-siem"},{"name":"performing-file-carving-with-foremost","description":"Recover files from disk images and unallocated space using Foremost's header-footer signature carving to extract","domain":"cybersecurity","path":"skills/performing-file-carving-with-foremost"},{"name":"performing-firmware-extraction-with-binwalk","description":"'Performs firmware image extraction and analysis using binwalk to identify embedded filesystems, compressed archives,","domain":"cybersecurity","path":"skills/performing-firmware-extraction-with-binwalk"},{"name":"performing-firmware-malware-analysis","description":"'Analyzes firmware images for embedded malware, backdoors, and unauthorized modifications targeting routers,","domain":"cybersecurity","path":"skills/performing-firmware-malware-analysis"},{"name":"performing-fuzzing-with-aflplusplus","description":"'Perform coverage-guided fuzzing of compiled binaries using AFL++ (American Fuzzy Lop Plus Plus) to discover","domain":"cybersecurity","path":"skills/performing-fuzzing-with-aflplusplus"},{"name":"performing-gcp-penetration-testing-with-gcpbucketbrute","description":"Perform GCP security testing using GCPBucketBrute for storage bucket enumeration, gcloud IAM privilege escalation","domain":"cybersecurity","path":"skills/performing-gcp-penetration-testing-with-gcpbucketbrute"},{"name":"performing-gcp-security-assessment-with-forseti","description":"'Performing comprehensive security assessments of Google Cloud Platform environments using Forseti Security,","domain":"cybersecurity","path":"skills/performing-gcp-security-assessment-with-forseti"},{"name":"performing-graphql-depth-limit-attack","description":"Execute and test GraphQL depth limit attacks using deeply nested recursive queries to identify denial-of-service","domain":"cybersecurity","path":"skills/performing-graphql-depth-limit-attack"},{"name":"performing-graphql-introspection-attack","description":"'Performs GraphQL introspection attacks to extract the full API schema including types, queries, mutations, subscriptions,","domain":"cybersecurity","path":"skills/performing-graphql-introspection-attack"},{"name":"performing-graphql-security-assessment","description":"Assessing GraphQL API endpoints for introspection leaks, injection attacks, authorization flaws, and denial-of-service","domain":"cybersecurity","path":"skills/performing-graphql-security-assessment"},{"name":"performing-hardware-security-module-integration","description":"Integrate Hardware Security Modules (HSMs) using PKCS#11 interface for cryptographic key management, signing","domain":"cybersecurity","path":"skills/performing-hardware-security-module-integration"},{"name":"performing-hash-cracking-with-hashcat","description":"Hash cracking is an essential skill for penetration testers and security auditors to evaluate password strength.","domain":"cybersecurity","path":"skills/performing-hash-cracking-with-hashcat"},{"name":"performing-http-parameter-pollution-attack","description":"Execute HTTP Parameter Pollution attacks to bypass input validation, WAF rules, and security controls by injecting","domain":"cybersecurity","path":"skills/performing-http-parameter-pollution-attack"},{"name":"performing-ics-asset-discovery-with-claroty","description":"'Perform comprehensive ICS/OT asset discovery using Claroty xDome platform, leveraging passive monitoring, Claroty","domain":"cybersecurity","path":"skills/performing-ics-asset-discovery-with-claroty"},{"name":"performing-indicator-lifecycle-management","description":"Indicator lifecycle management tracks IOCs from initial discovery through validation, enrichment, deployment,","domain":"cybersecurity","path":"skills/performing-indicator-lifecycle-management"},{"name":"performing-initial-access-with-evilginx3","description":"Perform authorized initial access using EvilGinx3 adversary-in-the-middle phishing framework to capture session","domain":"cybersecurity","path":"skills/performing-initial-access-with-evilginx3"},{"name":"performing-insider-threat-investigation","description":"'Investigates insider threat incidents involving employees, contractors, or trusted partners who misuse authorized","domain":"cybersecurity","path":"skills/performing-insider-threat-investigation"},{"name":"performing-ioc-enrichment-automation","description":"'Automates Indicator of Compromise (IOC) enrichment by orchestrating lookups across VirusTotal, AbuseIPDB, Shodan,","domain":"cybersecurity","path":"skills/performing-ioc-enrichment-automation"},{"name":"performing-ios-app-security-assessment","description":"'Performs comprehensive iOS application security assessments using Frida for dynamic instrumentation, Objection","domain":"cybersecurity","path":"skills/performing-ios-app-security-assessment"},{"name":"performing-iot-security-assessment","description":"'Performs comprehensive security assessments of IoT devices and their ecosystems by testing hardware interfaces,","domain":"cybersecurity","path":"skills/performing-iot-security-assessment"},{"name":"performing-ip-reputation-analysis-with-shodan","description":"Analyze IP address reputation using the Shodan API to identify open ports, running services, known vulnerabilities,","domain":"cybersecurity","path":"skills/performing-ip-reputation-analysis-with-shodan"},{"name":"performing-jwt-none-algorithm-attack","description":"Execute and test the JWT none algorithm attack to bypass signature verification by manipulating the alg header","domain":"cybersecurity","path":"skills/performing-jwt-none-algorithm-attack"},{"name":"performing-kerberoasting-attack","description":"Kerberoasting is a post-exploitation technique that targets service accounts in Active Directory by requesting","domain":"cybersecurity","path":"skills/performing-kerberoasting-attack"},{"name":"performing-kubernetes-cis-benchmark-with-kube-bench","description":"Audit Kubernetes cluster security posture against CIS benchmarks using kube-bench with automated checks for control","domain":"cybersecurity","path":"skills/performing-kubernetes-cis-benchmark-with-kube-bench"},{"name":"performing-kubernetes-etcd-security-assessment","description":"Assess the security posture of Kubernetes etcd clusters by evaluating encryption at rest, TLS configuration,","domain":"cybersecurity","path":"skills/performing-kubernetes-etcd-security-assessment"},{"name":"performing-kubernetes-penetration-testing","description":"Kubernetes penetration testing systematically evaluates cluster security by simulating attacker techniques against","domain":"cybersecurity","path":"skills/performing-kubernetes-penetration-testing"},{"name":"performing-lateral-movement-detection","description":"'Detects lateral movement techniques including Pass-the-Hash, PsExec, WMI execution, RDP pivoting, and SMB-based","domain":"cybersecurity","path":"skills/performing-lateral-movement-detection"},{"name":"performing-lateral-movement-with-wmiexec","description":"Perform lateral movement across Windows networks using WMI-based remote execution techniques including Impacket","domain":"cybersecurity","path":"skills/performing-lateral-movement-with-wmiexec"},{"name":"performing-linux-log-forensics-investigation","description":"Perform forensic investigation of Linux system logs including syslog, auth.log, systemd journal, kern.log, and","domain":"cybersecurity","path":"skills/performing-linux-log-forensics-investigation"},{"name":"performing-log-analysis-for-forensic-investigation","description":"Collect, parse, and correlate system, application, and security logs to reconstruct events and establish timelines","domain":"cybersecurity","path":"skills/performing-log-analysis-for-forensic-investigation"},{"name":"performing-log-source-onboarding-in-siem","description":"Perform structured log source onboarding into SIEM platforms by configuring collectors, parsers, normalization,","domain":"cybersecurity","path":"skills/performing-log-source-onboarding-in-siem"},{"name":"performing-malware-hash-enrichment-with-virustotal","description":"Enrich malware file hashes using the VirusTotal API to retrieve detection rates, behavioral analysis, YARA matches,","domain":"cybersecurity","path":"skills/performing-malware-hash-enrichment-with-virustotal"},{"name":"performing-malware-ioc-extraction","description":"Malware IOC extraction is the process of analyzing malicious software to identify actionable indicators of compromise","domain":"cybersecurity","path":"skills/performing-malware-ioc-extraction"},{"name":"performing-malware-persistence-investigation","description":"Systematically investigate all persistence mechanisms on Windows and Linux systems to identify how malware survives","domain":"cybersecurity","path":"skills/performing-malware-persistence-investigation"},{"name":"performing-malware-triage-with-yara","description":"'Performs rapid malware triage and classification using YARA rules to match file patterns, strings, byte sequences,","domain":"cybersecurity","path":"skills/performing-malware-triage-with-yara"},{"name":"performing-memory-forensics-with-volatility3","description":"Analyze volatile memory dumps using Volatility 3 to extract running processes, network connections, loaded modules,","domain":"cybersecurity","path":"skills/performing-memory-forensics-with-volatility3"},{"name":"performing-memory-forensics-with-volatility3-plugins","description":"Analyze memory dumps using Volatility3 plugins to detect injected code, rootkits, credential theft, and malware","domain":"cybersecurity","path":"skills/performing-memory-forensics-with-volatility3-plugins"},{"name":"performing-mobile-app-certificate-pinning-bypass","description":"'Bypasses SSL/TLS certificate pinning implementations in Android and iOS applications to enable traffic interception","domain":"cybersecurity","path":"skills/performing-mobile-app-certificate-pinning-bypass"},{"name":"performing-mobile-device-forensics-with-cellebrite","description":"Acquire and analyze mobile device data using Cellebrite UFED and open-source tools to extract communications,","domain":"cybersecurity","path":"skills/performing-mobile-device-forensics-with-cellebrite"},{"name":"performing-network-forensics-with-wireshark","description":"Capture and analyze network traffic using Wireshark and tshark to reconstruct network events, extract artifacts,","domain":"cybersecurity","path":"skills/performing-network-forensics-with-wireshark"},{"name":"performing-network-packet-capture-analysis","description":"Perform forensic analysis of network packet captures (PCAP/PCAPNG) using Wireshark, tshark, and tcpdump to reconstruct","domain":"cybersecurity","path":"skills/performing-network-packet-capture-analysis"},{"name":"performing-network-traffic-analysis-with-tshark","description":"Automate network traffic analysis using tshark and pyshark for protocol statistics, suspicious flow detection,","domain":"cybersecurity","path":"skills/performing-network-traffic-analysis-with-tshark"},{"name":"performing-network-traffic-analysis-with-zeek","description":"Deploy Zeek network security monitor to capture, parse, and analyze network traffic metadata for threat detection,","domain":"cybersecurity","path":"skills/performing-network-traffic-analysis-with-zeek"},{"name":"performing-nist-csf-maturity-assessment","description":">-","domain":"cybersecurity","path":"skills/performing-nist-csf-maturity-assessment"},{"name":"performing-oauth-scope-minimization-review","description":"'Performs OAuth 2.0 scope minimization review to identify over-permissioned third-party application integrations,","domain":"cybersecurity","path":"skills/performing-oauth-scope-minimization-review"},{"name":"performing-oil-gas-cybersecurity-assessment","description":"'This skill covers conducting cybersecurity assessments specific to oil and gas facilities including upstream","domain":"cybersecurity","path":"skills/performing-oil-gas-cybersecurity-assessment"},{"name":"performing-open-source-intelligence-gathering","description":"Open Source Intelligence (OSINT) gathering is the first active phase of a red team engagement, where operators","domain":"cybersecurity","path":"skills/performing-open-source-intelligence-gathering"},{"name":"performing-osint-with-spiderfoot","description":"Automate OSINT collection using SpiderFoot REST API and CLI for target profiling, module-based reconnaissance,","domain":"cybersecurity","path":"skills/performing-osint-with-spiderfoot"},{"name":"performing-ot-network-security-assessment","description":"'This skill covers conducting comprehensive security assessments of Operational Technology (OT) networks including","domain":"cybersecurity","path":"skills/performing-ot-network-security-assessment"},{"name":"performing-ot-vulnerability-assessment-with-claroty","description":"'This skill covers performing vulnerability assessments in OT environments using the Claroty xDome platform for","domain":"cybersecurity","path":"skills/performing-ot-vulnerability-assessment-with-claroty"},{"name":"performing-ot-vulnerability-scanning-safely","description":"'Perform vulnerability scanning in OT/ICS environments safely using passive monitoring, native protocol queries,","domain":"cybersecurity","path":"skills/performing-ot-vulnerability-scanning-safely"},{"name":"performing-packet-injection-attack","description":"'Crafts and injects custom network packets using Scapy, hping3, and Nemesis during authorized security assessments","domain":"cybersecurity","path":"skills/performing-packet-injection-attack"},{"name":"performing-paste-site-monitoring-for-credentials","description":"Monitor paste sites like Pastebin and GitHub Gists for leaked credentials, API keys, and sensitive data dumps","domain":"cybersecurity","path":"skills/performing-paste-site-monitoring-for-credentials"},{"name":"performing-phishing-simulation-with-gophish","description":"GoPhish is an open-source phishing simulation framework used by security teams to conduct authorized phishing","domain":"cybersecurity","path":"skills/performing-phishing-simulation-with-gophish"},{"name":"performing-physical-intrusion-assessment","description":"Conduct authorized physical penetration testing using tailgating, badge cloning, lock bypassing, and rogue device","domain":"cybersecurity","path":"skills/performing-physical-intrusion-assessment"},{"name":"performing-plc-firmware-security-analysis","description":"'This skill covers analyzing Programmable Logic Controller (PLC) firmware for security vulnerabilities including","domain":"cybersecurity","path":"skills/performing-plc-firmware-security-analysis"},{"name":"performing-post-quantum-cryptography-migration","description":"'Assesses organizational readiness for post-quantum cryptography migration per NIST FIPS 203/204/205 standards.","domain":"cybersecurity","path":"skills/performing-post-quantum-cryptography-migration"},{"name":"performing-power-grid-cybersecurity-assessment","description":"'This skill covers conducting cybersecurity assessments of electric power grid infrastructure including generation","domain":"cybersecurity","path":"skills/performing-power-grid-cybersecurity-assessment"},{"name":"performing-privacy-impact-assessment","description":"'Automates the Privacy Impact Assessment (PIA) workflow including data flow mapping, privacy risk scoring matrices,","domain":"cybersecurity","path":"skills/performing-privacy-impact-assessment"},{"name":"performing-privilege-escalation-assessment","description":"'Performs privilege escalation assessments on compromised Linux and Windows systems to identify paths from low-privilege","domain":"cybersecurity","path":"skills/performing-privilege-escalation-assessment"},{"name":"performing-privilege-escalation-on-linux","description":"Linux privilege escalation involves elevating from a low-privilege user account to root access on a compromised","domain":"cybersecurity","path":"skills/performing-privilege-escalation-on-linux"},{"name":"performing-privileged-account-access-review","description":"Conduct systematic reviews of privileged accounts to validate access rights, identify excessive permissions,","domain":"cybersecurity","path":"skills/performing-privileged-account-access-review"},{"name":"performing-privileged-account-discovery","description":"Discover and inventory all privileged accounts across enterprise infrastructure including domain admins, local","domain":"cybersecurity","path":"skills/performing-privileged-account-discovery"},{"name":"performing-purple-team-atomic-testing","description":"'Executes Atomic Red Team tests mapped to MITRE ATT&CK techniques, performs coverage gap analysis across the","domain":"cybersecurity","path":"skills/performing-purple-team-atomic-testing"},{"name":"performing-purple-team-exercise","description":"'Performs purple team exercises by coordinating red team adversary emulation with blue team detection validation","domain":"cybersecurity","path":"skills/performing-purple-team-exercise"},{"name":"performing-ransomware-response","description":"'Executes a structured ransomware incident response from initial detection through containment, forensic analysis,","domain":"cybersecurity","path":"skills/performing-ransomware-response"},{"name":"performing-ransomware-tabletop-exercise","description":"'Plans and facilitates tabletop exercises simulating ransomware incidents to test organizational readiness, decision-making,","domain":"cybersecurity","path":"skills/performing-ransomware-tabletop-exercise"},{"name":"performing-red-team-phishing-with-gophish","description":"Automate GoPhish phishing simulation campaigns using the Python gophish library. Creates email templates with","domain":"cybersecurity","path":"skills/performing-red-team-phishing-with-gophish"},{"name":"performing-red-team-with-covenant","description":"Conduct red team operations using the Covenant C2 framework for authorized adversary simulation, including listener","domain":"cybersecurity","path":"skills/performing-red-team-with-covenant"},{"name":"performing-s7comm-protocol-security-analysis","description":"'Perform security analysis of Siemens S7comm and S7CommPlus protocols used by SIMATIC S7 PLCs to identify vulnerabilities","domain":"cybersecurity","path":"skills/performing-s7comm-protocol-security-analysis"},{"name":"performing-sca-dependency-scanning-with-snyk","description":"'This skill covers implementing Software Composition Analysis (SCA) using Snyk to detect vulnerable open-source","domain":"cybersecurity","path":"skills/performing-sca-dependency-scanning-with-snyk"},{"name":"performing-scada-hmi-security-assessment","description":"'Perform security assessments of SCADA Human-Machine Interface (HMI) systems to identify vulnerabilities in web-based","domain":"cybersecurity","path":"skills/performing-scada-hmi-security-assessment"},{"name":"performing-second-order-sql-injection","description":"Detect and exploit second-order SQL injection vulnerabilities where malicious input is stored in a database and","domain":"cybersecurity","path":"skills/performing-second-order-sql-injection"},{"name":"performing-security-headers-audit","description":"Auditing HTTP security headers including CSP, HSTS, X-Frame-Options, and cookie attributes to identify missing","domain":"cybersecurity","path":"skills/performing-security-headers-audit"},{"name":"performing-serverless-function-security-review","description":"'Performing security reviews of serverless functions across AWS Lambda, Azure Functions, and GCP Cloud Functions","domain":"cybersecurity","path":"skills/performing-serverless-function-security-review"},{"name":"performing-service-account-audit","description":"Audit service accounts across enterprise infrastructure to identify orphaned, over-privileged, and non-compliant","domain":"cybersecurity","path":"skills/performing-service-account-audit"},{"name":"performing-service-account-credential-rotation","description":"Automate credential rotation for service accounts across Active Directory, cloud platforms, and application databases","domain":"cybersecurity","path":"skills/performing-service-account-credential-rotation"},{"name":"performing-soap-web-service-security-testing","description":"Perform security testing of SOAP web services by analyzing WSDL definitions and testing for XML injection, XXE,","domain":"cybersecurity","path":"skills/performing-soap-web-service-security-testing"},{"name":"performing-soc-tabletop-exercise","description":"'Performs tabletop exercises for SOC teams simulating security incidents through discussion-based scenarios to","domain":"cybersecurity","path":"skills/performing-soc-tabletop-exercise"},{"name":"performing-soc2-type2-audit-preparation","description":"'Automates SOC 2 Type II audit preparation including gap assessment against AICPA Trust Services Criteria (CC1-CC9),","domain":"cybersecurity","path":"skills/performing-soc2-type2-audit-preparation"},{"name":"performing-sqlite-database-forensics","description":"Perform forensic analysis of SQLite databases to recover deleted records from freelists and WAL files, decode","domain":"cybersecurity","path":"skills/performing-sqlite-database-forensics"},{"name":"performing-ssl-certificate-lifecycle-management","description":"SSL/TLS certificate lifecycle management encompasses the full process of requesting, issuing, deploying, monitoring,","domain":"cybersecurity","path":"skills/performing-ssl-certificate-lifecycle-management"},{"name":"performing-ssl-stripping-attack","description":"'Simulates SSL stripping attacks using sslstrip, Bettercap, and mitmproxy in authorized environments to test","domain":"cybersecurity","path":"skills/performing-ssl-stripping-attack"},{"name":"performing-ssl-tls-inspection-configuration","description":"Configure SSL/TLS inspection on network security devices to decrypt, inspect, and re-encrypt HTTPS traffic for","domain":"cybersecurity","path":"skills/performing-ssl-tls-inspection-configuration"},{"name":"performing-ssl-tls-security-assessment","description":"Assess SSL/TLS server configurations using the sslyze Python library to evaluate cipher suites, certificate chains,","domain":"cybersecurity","path":"skills/performing-ssl-tls-security-assessment"},{"name":"performing-ssrf-vulnerability-exploitation","description":"Test for Server-Side Request Forgery vulnerabilities by probing cloud metadata endpoints, internal network services,","domain":"cybersecurity","path":"skills/performing-ssrf-vulnerability-exploitation"},{"name":"performing-static-malware-analysis-with-pe-studio","description":"'Performs static analysis of Windows PE (Portable Executable) malware samples using PEStudio to examine file","domain":"cybersecurity","path":"skills/performing-static-malware-analysis-with-pe-studio"},{"name":"performing-steganography-detection","description":"Detect and extract hidden data embedded in images, audio, and other media files using steganalysis tools to uncover","domain":"cybersecurity","path":"skills/performing-steganography-detection"},{"name":"performing-subdomain-enumeration-with-subfinder","description":"Enumerate subdomains of target domains using ProjectDiscovery's Subfinder passive reconnaissance tool to map","domain":"cybersecurity","path":"skills/performing-subdomain-enumeration-with-subfinder"},{"name":"performing-supply-chain-attack-simulation","description":"Simulate and detect software supply chain attacks including typosquatting detection via Levenshtein distance,","domain":"cybersecurity","path":"skills/performing-supply-chain-attack-simulation"},{"name":"performing-thick-client-application-penetration-test","description":"Conduct a thick client application penetration test to identify insecure local storage, hardcoded credentials,","domain":"cybersecurity","path":"skills/performing-thick-client-application-penetration-test"},{"name":"performing-threat-emulation-with-atomic-red-team","description":"'Executes Atomic Red Team tests for MITRE ATT&CK technique validation using the atomic-operator Python framework.","domain":"cybersecurity","path":"skills/performing-threat-emulation-with-atomic-red-team"},{"name":"performing-threat-hunting-with-elastic-siem","description":"'Performs proactive threat hunting in Elastic Security SIEM using KQL/EQL queries, detection rules, and Timeline","domain":"cybersecurity","path":"skills/performing-threat-hunting-with-elastic-siem"},{"name":"performing-threat-hunting-with-yara-rules","description":"'Use YARA pattern-matching rules to hunt for malware, suspicious files, and indicators of compromise across filesystems","domain":"cybersecurity","path":"skills/performing-threat-hunting-with-yara-rules"},{"name":"performing-threat-intelligence-sharing-with-misp","description":"Use PyMISP to create, enrich, and share threat intelligence events on a MISP platform, including IOC management,","domain":"cybersecurity","path":"skills/performing-threat-intelligence-sharing-with-misp"},{"name":"performing-threat-landscape-assessment-for-sector","description":"Conduct a sector-specific threat landscape assessment by analyzing threat actor targeting patterns, common attack","domain":"cybersecurity","path":"skills/performing-threat-landscape-assessment-for-sector"},{"name":"performing-threat-modeling-with-owasp-threat-dragon","description":"Use OWASP Threat Dragon to create data flow diagrams, identify threats using STRIDE and LINDDUN methodologies,","domain":"cybersecurity","path":"skills/performing-threat-modeling-with-owasp-threat-dragon"},{"name":"performing-timeline-reconstruction-with-plaso","description":"Build comprehensive forensic super-timelines using Plaso (log2timeline) to correlate events across file systems,","domain":"cybersecurity","path":"skills/performing-timeline-reconstruction-with-plaso"},{"name":"performing-user-behavior-analytics","description":"'Performs User and Entity Behavior Analytics (UEBA) to detect anomalous user activities including impossible","domain":"cybersecurity","path":"skills/performing-user-behavior-analytics"},{"name":"performing-vlan-hopping-attack","description":"'Simulates VLAN hopping attacks using switch spoofing and double tagging techniques in authorized environments","domain":"cybersecurity","path":"skills/performing-vlan-hopping-attack"},{"name":"performing-vulnerability-scanning-with-nessus","description":"'Performs authenticated and unauthenticated vulnerability scanning using Tenable Nessus to identify known vulnerabilities,","domain":"cybersecurity","path":"skills/performing-vulnerability-scanning-with-nessus"},{"name":"performing-web-application-firewall-bypass","description":"Bypass Web Application Firewall protections using encoding techniques, HTTP method manipulation, parameter pollution,","domain":"cybersecurity","path":"skills/performing-web-application-firewall-bypass"},{"name":"performing-web-application-penetration-test","description":"'Performs systematic security testing of web applications following the OWASP Web Security Testing Guide (WSTG)","domain":"cybersecurity","path":"skills/performing-web-application-penetration-test"},{"name":"performing-web-application-scanning-with-nikto","description":"Nikto is an open-source web server and web application scanner that tests against over 7,000 potentially dangerous","domain":"cybersecurity","path":"skills/performing-web-application-scanning-with-nikto"},{"name":"performing-web-application-vulnerability-triage","description":"Triage web application vulnerability findings from DAST/SAST scanners using OWASP risk rating methodology to","domain":"cybersecurity","path":"skills/performing-web-application-vulnerability-triage"},{"name":"performing-web-cache-deception-attack","description":"Execute web cache deception attacks by exploiting path normalization discrepancies between CDN caching layers","domain":"cybersecurity","path":"skills/performing-web-cache-deception-attack"},{"name":"performing-web-cache-poisoning-attack","description":"Exploiting web cache mechanisms to serve malicious content to other users by poisoning cached responses through","domain":"cybersecurity","path":"skills/performing-web-cache-poisoning-attack"},{"name":"performing-wifi-password-cracking-with-aircrack","description":"'Captures WPA/WPA2 handshakes and performs offline password cracking using aircrack-ng, hashcat, and dictionary","domain":"cybersecurity","path":"skills/performing-wifi-password-cracking-with-aircrack"},{"name":"performing-windows-artifact-analysis-with-eric-zimmerman-tools","description":"Perform comprehensive Windows forensic artifact analysis using Eric Zimmerman's open-source EZ Tools suite including","domain":"cybersecurity","path":"skills/performing-windows-artifact-analysis-with-eric-zimmerman-tools"},{"name":"performing-wireless-network-penetration-test","description":"Execute a wireless network penetration test to assess WiFi security by capturing handshakes, cracking WPA2/WPA3","domain":"cybersecurity","path":"skills/performing-wireless-network-penetration-test"},{"name":"performing-wireless-security-assessment-with-kismet","description":"Conduct wireless network security assessments using Kismet to detect rogue access points, hidden SSIDs, weak","domain":"cybersecurity","path":"skills/performing-wireless-security-assessment-with-kismet"},{"name":"performing-yara-rule-development-for-detection","description":"Develop precise YARA rules for malware detection by identifying unique byte patterns, strings, and behavioral","domain":"cybersecurity","path":"skills/performing-yara-rule-development-for-detection"},{"name":"prioritizing-vulnerabilities-with-cvss-scoring","description":"The Common Vulnerability Scoring System (CVSS) is the industry standard framework maintained by FIRST (Forum","domain":"cybersecurity","path":"skills/prioritizing-vulnerabilities-with-cvss-scoring"},{"name":"processing-stix-taxii-feeds","description":"'Processes STIX 2.1 threat intelligence bundles delivered via TAXII 2.1 servers, normalizing objects into platform-native","domain":"cybersecurity","path":"skills/processing-stix-taxii-feeds"},{"name":"profiling-threat-actor-groups","description":"'Develops comprehensive threat actor profiles for APT groups, criminal organizations, and hacktivist collectives","domain":"cybersecurity","path":"skills/profiling-threat-actor-groups"},{"name":"recovering-deleted-files-with-photorec","description":"Recover deleted files from disk images and storage media using PhotoRec's file signature-based carving engine","domain":"cybersecurity","path":"skills/recovering-deleted-files-with-photorec"},{"name":"recovering-from-ransomware-attack","description":"'Executes structured recovery from a ransomware incident following NIST and CISA frameworks, including environment","domain":"cybersecurity","path":"skills/recovering-from-ransomware-attack"},{"name":"remediating-s3-bucket-misconfiguration","description":"'This skill provides step-by-step procedures for identifying and remediating Amazon S3 bucket misconfigurations","domain":"cybersecurity","path":"skills/remediating-s3-bucket-misconfiguration"},{"name":"reverse-engineering-android-malware-with-jadx","description":"'Reverse engineers malicious Android APK files using JADX decompiler to analyze Java/Kotlin source code, identify","domain":"cybersecurity","path":"skills/reverse-engineering-android-malware-with-jadx"},{"name":"reverse-engineering-dotnet-malware-with-dnspy","description":"'Reverse engineers .NET malware using dnSpy decompiler and debugger to analyze C#/VB.NET source code, identify","domain":"cybersecurity","path":"skills/reverse-engineering-dotnet-malware-with-dnspy"},{"name":"reverse-engineering-ios-app-with-frida","description":"'Reverse engineers iOS applications using Frida dynamic instrumentation to understand internal logic, extract","domain":"cybersecurity","path":"skills/reverse-engineering-ios-app-with-frida"},{"name":"reverse-engineering-malware-with-ghidra","description":"'Reverse engineers malware binaries using NSA''s Ghidra disassembler and decompiler to understand internal logic,","domain":"cybersecurity","path":"skills/reverse-engineering-malware-with-ghidra"},{"name":"reverse-engineering-ransomware-encryption-routine","description":"Reverse engineer ransomware encryption routines to identify cryptographic algorithms, key generation flaws, and","domain":"cybersecurity","path":"skills/reverse-engineering-ransomware-encryption-routine"},{"name":"reverse-engineering-rust-malware","description":"Reverse engineer Rust-compiled malware using IDA Pro and Ghidra with techniques for handling non-null-terminated","domain":"cybersecurity","path":"skills/reverse-engineering-rust-malware"},{"name":"scanning-container-images-with-grype","description":"Scan container images for known vulnerabilities using Anchore Grype with SBOM-based matching and configurable","domain":"cybersecurity","path":"skills/scanning-container-images-with-grype"},{"name":"scanning-containers-with-trivy-in-cicd","description":"'This skill covers integrating Aqua Security''s Trivy scanner into CI/CD pipelines for comprehensive container","domain":"cybersecurity","path":"skills/scanning-containers-with-trivy-in-cicd"},{"name":"scanning-docker-images-with-trivy","description":"Trivy is a comprehensive open-source vulnerability scanner by Aqua Security that detects vulnerabilities in OS","domain":"cybersecurity","path":"skills/scanning-docker-images-with-trivy"},{"name":"scanning-infrastructure-with-nessus","description":"Tenable Nessus is the industry-leading vulnerability scanner used to identify security weaknesses across network","domain":"cybersecurity","path":"skills/scanning-infrastructure-with-nessus"},{"name":"scanning-kubernetes-manifests-with-kubesec","description":"Perform security risk analysis on Kubernetes resource manifests using Kubesec to identify misconfigurations,","domain":"cybersecurity","path":"skills/scanning-kubernetes-manifests-with-kubesec"},{"name":"scanning-network-with-nmap-advanced","description":"'Performs advanced network reconnaissance using Nmap''s scripting engine, timing controls, evasion techniques,","domain":"cybersecurity","path":"skills/scanning-network-with-nmap-advanced"},{"name":"securing-api-gateway-with-aws-waf","description":"'Securing API Gateway endpoints with AWS WAF by configuring managed rule groups for OWASP Top 10 protection,","domain":"cybersecurity","path":"skills/securing-api-gateway-with-aws-waf"},{"name":"securing-aws-iam-permissions","description":"'This skill guides practitioners through hardening AWS Identity and Access Management configurations to enforce","domain":"cybersecurity","path":"skills/securing-aws-iam-permissions"},{"name":"securing-aws-lambda-execution-roles","description":"'Securing AWS Lambda execution roles by implementing least-privilege IAM policies, applying permission boundaries,","domain":"cybersecurity","path":"skills/securing-aws-lambda-execution-roles"},{"name":"securing-azure-with-microsoft-defender","description":"'This skill instructs security practitioners on deploying Microsoft Defender for Cloud as a cloud-native application","domain":"cybersecurity","path":"skills/securing-azure-with-microsoft-defender"},{"name":"securing-container-registry-images","description":"'Securing container registry images by implementing vulnerability scanning with Trivy and Grype, enforcing image","domain":"cybersecurity","path":"skills/securing-container-registry-images"},{"name":"securing-container-registry-with-harbor","description":"Harbor is an open-source container registry that provides security features including vulnerability scanning","domain":"cybersecurity","path":"skills/securing-container-registry-with-harbor"},{"name":"securing-github-actions-workflows","description":"'This skill covers hardening GitHub Actions workflows against supply chain attacks, credential theft, and privilege","domain":"cybersecurity","path":"skills/securing-github-actions-workflows"},{"name":"securing-helm-chart-deployments","description":"Secure Helm chart deployments by validating chart integrity, scanning templates for misconfigurations, and enforcing","domain":"cybersecurity","path":"skills/securing-helm-chart-deployments"},{"name":"securing-historian-server-in-ot-environment","description":"'This skill covers hardening and securing process historian servers (OSIsoft PI, Honeywell PHD, GE Proficy, AVEVA","domain":"cybersecurity","path":"skills/securing-historian-server-in-ot-environment"},{"name":"securing-kubernetes-on-cloud","description":"'This skill covers hardening managed Kubernetes clusters on EKS, AKS, and GKE by implementing Pod Security Standards,","domain":"cybersecurity","path":"skills/securing-kubernetes-on-cloud"},{"name":"securing-remote-access-to-ot-environment","description":"'This skill covers implementing secure remote access to OT/ICS environments for operators, engineers, and vendors","domain":"cybersecurity","path":"skills/securing-remote-access-to-ot-environment"},{"name":"securing-serverless-functions","description":"'This skill covers security hardening for serverless compute platforms including AWS Lambda, Azure Functions,","domain":"cybersecurity","path":"skills/securing-serverless-functions"},{"name":"testing-android-intents-for-vulnerabilities","description":"'Tests Android inter-process communication (IPC) through intents for vulnerabilities including intent injection,","domain":"cybersecurity","path":"skills/testing-android-intents-for-vulnerabilities"},{"name":"testing-api-authentication-weaknesses","description":"'Tests API authentication mechanisms for weaknesses including broken token validation, missing authentication","domain":"cybersecurity","path":"skills/testing-api-authentication-weaknesses"},{"name":"testing-api-for-broken-object-level-authorization","description":"'Tests REST and GraphQL APIs for Broken Object Level Authorization (BOLA/IDOR) vulnerabilities where an authenticated","domain":"cybersecurity","path":"skills/testing-api-for-broken-object-level-authorization"},{"name":"testing-api-for-mass-assignment-vulnerability","description":"'Tests APIs for mass assignment (auto-binding) vulnerabilities where clients can modify object properties they","domain":"cybersecurity","path":"skills/testing-api-for-mass-assignment-vulnerability"},{"name":"testing-api-security-with-owasp-top-10","description":"Systematically assessing REST and GraphQL API endpoints against the OWASP API Security Top 10 risks using automated","domain":"cybersecurity","path":"skills/testing-api-security-with-owasp-top-10"},{"name":"testing-cors-misconfiguration","description":"Identifying and exploiting Cross-Origin Resource Sharing misconfigurations that allow unauthorized cross-domain","domain":"cybersecurity","path":"skills/testing-cors-misconfiguration"},{"name":"testing-for-broken-access-control","description":"Systematically testing web applications for broken access control vulnerabilities including privilege escalation,","domain":"cybersecurity","path":"skills/testing-for-broken-access-control"},{"name":"testing-for-business-logic-vulnerabilities","description":"Identifying flaws in application business logic that allow price manipulation, workflow bypass, and privilege","domain":"cybersecurity","path":"skills/testing-for-business-logic-vulnerabilities"},{"name":"testing-for-email-header-injection","description":"Test web application email functionality for SMTP header injection vulnerabilities that allow attackers to inject","domain":"cybersecurity","path":"skills/testing-for-email-header-injection"},{"name":"testing-for-host-header-injection","description":"Test web applications for HTTP Host header injection vulnerabilities to identify password reset poisoning, web","domain":"cybersecurity","path":"skills/testing-for-host-header-injection"},{"name":"testing-for-json-web-token-vulnerabilities","description":"Test JWT implementations for critical vulnerabilities including algorithm confusion, none algorithm bypass, kid","domain":"cybersecurity","path":"skills/testing-for-json-web-token-vulnerabilities"},{"name":"testing-for-open-redirect-vulnerabilities","description":"Identify and test open redirect vulnerabilities in web applications by analyzing URL redirection parameters,","domain":"cybersecurity","path":"skills/testing-for-open-redirect-vulnerabilities"},{"name":"testing-for-sensitive-data-exposure","description":"Identifying sensitive data exposure vulnerabilities including API key leakage, PII in responses, insecure storage,","domain":"cybersecurity","path":"skills/testing-for-sensitive-data-exposure"},{"name":"testing-for-xml-injection-vulnerabilities","description":"Test web applications for XML injection vulnerabilities including XXE, XPath injection, and XML entity attacks","domain":"cybersecurity","path":"skills/testing-for-xml-injection-vulnerabilities"},{"name":"testing-for-xss-vulnerabilities","description":"'Tests web applications for Cross-Site Scripting (XSS) vulnerabilities by injecting JavaScript payloads into","domain":"cybersecurity","path":"skills/testing-for-xss-vulnerabilities"},{"name":"testing-for-xss-vulnerabilities-with-burpsuite","description":"Identifying and validating cross-site scripting vulnerabilities using Burp Suite's scanner, intruder, and repeater","domain":"cybersecurity","path":"skills/testing-for-xss-vulnerabilities-with-burpsuite"},{"name":"testing-for-xxe-injection-vulnerabilities","description":"Discovering and exploiting XML External Entity injection vulnerabilities to read server files, perform SSRF,","domain":"cybersecurity","path":"skills/testing-for-xxe-injection-vulnerabilities"},{"name":"testing-jwt-token-security","description":"Assessing JSON Web Token implementations for cryptographic weaknesses, algorithm confusion attacks, and authorization","domain":"cybersecurity","path":"skills/testing-jwt-token-security"},{"name":"testing-mobile-api-authentication","description":"'Tests authentication and authorization mechanisms in mobile application APIs to identify broken authentication,","domain":"cybersecurity","path":"skills/testing-mobile-api-authentication"},{"name":"testing-oauth2-implementation-flaws","description":"'Tests OAuth 2.0 and OpenID Connect implementations for security flaws including authorization code interception,","domain":"cybersecurity","path":"skills/testing-oauth2-implementation-flaws"},{"name":"testing-ransomware-recovery-procedures","description":"Test and validate ransomware recovery procedures including backup restore operations, RTO/RPO target verification,","domain":"cybersecurity","path":"skills/testing-ransomware-recovery-procedures"},{"name":"testing-websocket-api-security","description":"'Tests WebSocket API implementations for security vulnerabilities including missing authentication on WebSocket","domain":"cybersecurity","path":"skills/testing-websocket-api-security"},{"name":"tracking-threat-actor-infrastructure","description":"Threat actor infrastructure tracking involves monitoring and mapping adversary-controlled assets including command-and-control","domain":"cybersecurity","path":"skills/tracking-threat-actor-infrastructure"},{"name":"triaging-security-alerts-in-splunk","description":"'Triages security alerts in Splunk Enterprise Security by classifying severity, investigating notable events,","domain":"cybersecurity","path":"skills/triaging-security-alerts-in-splunk"},{"name":"triaging-security-incident","description":"'Performs initial triage of security incidents to determine severity, scope, and required response actions using","domain":"cybersecurity","path":"skills/triaging-security-incident"},{"name":"triaging-security-incident-with-ir-playbook","description":"Classify and prioritize security incidents using structured IR playbooks to determine severity, assign response","domain":"cybersecurity","path":"skills/triaging-security-incident-with-ir-playbook"},{"name":"triaging-vulnerabilities-with-ssvc-framework","description":"Triage and prioritize vulnerabilities using CISA's Stakeholder-Specific Vulnerability Categorization (SSVC) decision","domain":"cybersecurity","path":"skills/triaging-vulnerabilities-with-ssvc-framework"},{"name":"validating-backup-integrity-for-recovery","description":"Validate backup integrity through cryptographic hash verification, automated restore testing, corruption detection,","domain":"cybersecurity","path":"skills/validating-backup-integrity-for-recovery"}]} \ No newline at end of file diff --git a/personas/_shared/anthropic-cybersecurity-skills/mappings/README.md b/personas/_shared/anthropic-cybersecurity-skills/mappings/README.md new file mode 100644 index 0000000..d579c53 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/mappings/README.md @@ -0,0 +1,88 @@ +# MITRE ATT&CK Navigator Layer - Anthropic Cybersecurity Skills + +This directory contains a MITRE ATT&CK Navigator layer file that maps the coverage of the Anthropic Cybersecurity Skills repository against the ATT&CK Enterprise matrix. + +## Files + +| File | Description | +|------|-------------| +| `attack-navigator-layer.json` | ATT&CK Navigator layer (v4.5 format, Enterprise ATT&CK v14) | + +## How to View + +1. Open the [MITRE ATT&CK Navigator](https://mitre-attack.github.io/attack-navigator/) +2. Click **Open Existing Layer** +3. Select **Upload from local** and choose `attack-navigator-layer.json` +4. The matrix will display with blue-shaded techniques indicating coverage + +Alternatively, paste the raw JSON URL into the Navigator's "Load from URL" option if this file is hosted publicly. + +## Coverage Statistics + +| Metric | Value | +|--------|-------| +| Total skills scanned | 742 | +| Unique ATT&CK techniques referenced | 218 | +| Parent techniques | 94 | +| Sub-techniques | 124 | +| Tactics with coverage | 14/14 | + +## Coverage by Tactic + +| Tactic | Techniques Covered | +|--------|-------------------| +| Defense Evasion | 36 | +| Credential Access | 33 | +| Persistence | 29 | +| Initial Access | 17 | +| Command and Control | 17 | +| Privilege Escalation | 13 | +| Discovery | 12 | +| Exfiltration | 12 | +| Reconnaissance | 11 | +| Collection | 10 | +| Lateral Movement | 9 | +| Execution | 8 | +| Resource Development | 6 | +| Impact | 5 | + +## Color Scale + +The layer uses a blue gradient to indicate coverage depth: + +- **Light blue** (`#cfe2f3`): 1-2 skills reference this technique +- **Medium blue** (`#6fa8dc`): 3-5 skills reference this technique +- **Dark blue** (`#3d85c6`): 6-10 skills reference this technique +- **Deep blue** (`#1155cc`): 11+ skills reference this technique + +## Top 10 Most Covered Techniques + +| Technique | Name | Skills | +|-----------|------|--------| +| T1059.001 | PowerShell | 26 | +| T1055 | Process Injection | 17 | +| T1053.005 | Scheduled Task | 16 | +| T1566.001 | Spearphishing Attachment | 15 | +| T1558.003 | Kerberoasting | 14 | +| T1547.001 | Registry Run Keys / Startup Folder | 13 | +| T1078 | Valid Accounts | 13 | +| T1003.006 | DCSync | 13 | +| T1071.001 | Web Protocols | 12 | +| T1021.002 | SMB/Windows Admin Shares | 12 | + +## Methodology + +Techniques were extracted by scanning all `SKILL.md` files in the repository for ATT&CK technique ID patterns (`T1XXX` and `T1XXX.XXX`). Each technique's score is proportional to the number of distinct skills that reference it, normalized to a 1-100 scale. + +## Layer Format + +- **Format version**: 4.5 +- **ATT&CK version**: 14 (Enterprise) +- **Navigator version**: 4.9.1 +- **Domain**: enterprise-attack + +## Related Links + +- [MITRE ATT&CK Framework](https://attack.mitre.org/) +- [ATT&CK Navigator](https://mitre-attack.github.io/attack-navigator/) +- [ATT&CK Navigator GitHub](https://github.com/mitre-attack/attack-navigator) diff --git a/personas/_shared/anthropic-cybersecurity-skills/mappings/attack-navigator-layer.json b/personas/_shared/anthropic-cybersecurity-skills/mappings/attack-navigator-layer.json new file mode 100644 index 0000000..0fea4a1 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/mappings/attack-navigator-layer.json @@ -0,0 +1,3594 @@ +{ + "name": "Anthropic Cybersecurity Skills - ATT&CK Coverage", + "versions": { + "attack": "14", + "navigator": "4.9.1", + "layer": "4.5" + }, + "domain": "enterprise-attack", + "description": "MITRE ATT&CK technique coverage map for the Anthropic Cybersecurity Skills repository. Each technique is scored by the number of skills that reference it. Higher scores (darker colors) indicate more comprehensive coverage across multiple training skills.", + "filters": { + "platforms": [ + "Linux", + "macOS", + "Windows", + "Network", + "PRE", + "Containers", + "Office 365", + "SaaS", + "Google Workspace", + "IaaS", + "Azure AD" + ] + }, + "sorting": 3, + "layout": { + "layout": "side", + "showID": true, + "showName": true, + "showAggregateScores": false, + "countUnscored": false, + "aggregateFunction": "average", + "expandedSubtechniques": "annotated" + }, + "hideDisabled": false, + "techniques": [ + { + "techniqueID": "T1003", + "score": 42, + "comment": "OS Credential Dumping - Referenced in 11 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "11" + }, + { + "name": "skills", + "value": "building-attack-pattern-library-from-cti-reports, building-detection-rules-with-sigma, detecting-container-escape-with-falco-rules, detecting-credential-dumping-techniques, detecting-credential-dumping-with-edr (+6 more)" + } + ] + }, + { + "techniqueID": "T1003.001", + "score": 46, + "comment": "LSASS Memory - Referenced in 12 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "12" + }, + { + "name": "skills", + "value": "building-attack-pattern-library-from-cti-reports, building-detection-rule-with-splunk-spl, building-detection-rules-with-sigma, conducting-full-scope-red-team-engagement, conducting-internal-network-penetration-test (+7 more)" + } + ] + }, + { + "techniqueID": "T1003.002", + "score": 8, + "comment": "SAM - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "detecting-credential-dumping-with-edr, detecting-t1003-credential-dumping-with-edr" + } + ] + }, + { + "techniqueID": "T1003.003", + "score": 8, + "comment": "NTDS - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "detecting-credential-dumping-with-edr, detecting-t1003-credential-dumping-with-edr" + } + ] + }, + { + "techniqueID": "T1003.004", + "score": 12, + "comment": "LSA Secrets - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "detecting-credential-dumping-with-edr, detecting-t1003-credential-dumping-with-edr, performing-credential-access-with-lazagne" + } + ] + }, + { + "techniqueID": "T1003.005", + "score": 8, + "comment": "Cached Domain Credentials - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "detecting-credential-dumping-with-edr, detecting-t1003-credential-dumping-with-edr" + } + ] + }, + { + "techniqueID": "T1003.006", + "score": 50, + "comment": "DCSync - Referenced in 13 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "13" + }, + { + "name": "skills", + "value": "analyzing-windows-event-logs-in-splunk, conducting-domain-persistence-with-dcsync, conducting-full-scope-red-team-engagement, conducting-internal-network-penetration-test, detecting-credential-dumping-with-edr (+8 more)" + } + ] + }, + { + "techniqueID": "T1005", + "score": 8, + "comment": "Data from Local System - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "conducting-malware-incident-response, detecting-container-escape-with-falco-rules" + } + ] + }, + { + "techniqueID": "T1016", + "score": 12, + "comment": "System Network Configuration Discovery - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "conducting-full-scope-red-team-engagement, conducting-internal-reconnaissance-with-bloodhound-ce, exploiting-active-directory-with-bloodhound" + } + ] + }, + { + "techniqueID": "T1018", + "score": 15, + "comment": "Remote System Discovery - Referenced in 4 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "4" + }, + { + "name": "skills", + "value": "conducting-full-scope-red-team-engagement, conducting-internal-reconnaissance-with-bloodhound-ce, exploiting-active-directory-with-bloodhound, performing-active-directory-bloodhound-analysis" + } + ] + }, + { + "techniqueID": "T1020", + "score": 4, + "comment": "Automated Exfiltration - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-data-exfiltration-indicators" + } + ] + }, + { + "techniqueID": "T1021", + "score": 38, + "comment": "Remote Services - Referenced in 10 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "10" + }, + { + "name": "skills", + "value": "detecting-lateral-movement-in-network, detecting-lateral-movement-with-splunk, detecting-service-account-abuse, exploiting-constrained-delegation-abuse, implementing-continuous-security-validation-with-bas (+5 more)" + } + ] + }, + { + "techniqueID": "T1021.001", + "score": 31, + "comment": "Remote Desktop Protocol - Referenced in 8 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "8" + }, + { + "name": "skills", + "value": "analyzing-windows-event-logs-in-splunk, building-attack-pattern-library-from-cti-reports, detecting-lateral-movement-with-splunk, executing-red-team-exercise, implementing-mitre-attack-coverage-mapping (+3 more)" + } + ] + }, + { + "techniqueID": "T1021.002", + "score": 46, + "comment": "SMB/Windows Admin Shares - Referenced in 12 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "12" + }, + { + "name": "skills", + "value": "analyzing-windows-event-logs-in-splunk, building-attack-pattern-library-from-cti-reports, building-detection-rule-with-splunk-spl, conducting-full-scope-red-team-engagement, conducting-internal-network-penetration-test (+7 more)" + } + ] + }, + { + "techniqueID": "T1021.003", + "score": 12, + "comment": "DCOM - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "detecting-lateral-movement-with-splunk, performing-lateral-movement-detection, performing-lateral-movement-with-wmiexec" + } + ] + }, + { + "techniqueID": "T1021.004", + "score": 4, + "comment": "SSH - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "detecting-lateral-movement-with-splunk" + } + ] + }, + { + "techniqueID": "T1021.006", + "score": 12, + "comment": "Windows Remote Management - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "building-attack-pattern-library-from-cti-reports, detecting-lateral-movement-with-splunk, performing-lateral-movement-detection" + } + ] + }, + { + "techniqueID": "T1027", + "score": 8, + "comment": "Obfuscated Files or Information - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "analyzing-apt-group-with-mitre-navigator, conducting-full-scope-red-team-engagement" + } + ] + }, + { + "techniqueID": "T1029", + "score": 4, + "comment": "Scheduled Transfer - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-data-exfiltration-indicators" + } + ] + }, + { + "techniqueID": "T1030", + "score": 4, + "comment": "Data Transfer Size Limits - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-data-exfiltration-indicators" + } + ] + }, + { + "techniqueID": "T1033", + "score": 8, + "comment": "System Owner/User Discovery - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "conducting-internal-reconnaissance-with-bloodhound-ce, exploiting-active-directory-with-bloodhound" + } + ] + }, + { + "techniqueID": "T1036", + "score": 12, + "comment": "Masquerading - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "detecting-evasion-techniques-in-endpoint-logs, implementing-mitre-attack-coverage-mapping, implementing-siem-use-cases-for-detection" + } + ] + }, + { + "techniqueID": "T1036.005", + "score": 4, + "comment": "Match Legitimate Name or Location - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "detecting-process-injection-techniques" + } + ] + }, + { + "techniqueID": "T1040", + "score": 4, + "comment": "Network Sniffing - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "implementing-continuous-security-validation-with-bas" + } + ] + }, + { + "techniqueID": "T1041", + "score": 35, + "comment": "Exfiltration Over C2 Channel - Referenced in 9 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "9" + }, + { + "name": "skills", + "value": "analyzing-ransomware-network-indicators, building-attack-pattern-library-from-cti-reports, conducting-full-scope-red-team-engagement, conducting-malware-incident-response, executing-red-team-exercise (+4 more)" + } + ] + }, + { + "techniqueID": "T1047", + "score": 19, + "comment": "Windows Management Instrumentation - Referenced in 5 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "5" + }, + { + "name": "skills", + "value": "conducting-full-scope-red-team-engagement, detecting-lateral-movement-with-splunk, performing-lateral-movement-detection, performing-lateral-movement-with-wmiexec, performing-purple-team-exercise" + } + ] + }, + { + "techniqueID": "T1048", + "score": 19, + "comment": "Exfiltration Over Alternative Protocol - Referenced in 5 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "5" + }, + { + "name": "skills", + "value": "building-detection-rule-with-splunk-spl, conducting-full-scope-red-team-engagement, hunting-for-data-exfiltration-indicators, implementing-continuous-security-validation-with-bas, implementing-mitre-attack-coverage-mapping" + } + ] + }, + { + "techniqueID": "T1048.001", + "score": 4, + "comment": "Symmetric Encrypted Non-C2 - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-data-exfiltration-indicators" + } + ] + }, + { + "techniqueID": "T1048.002", + "score": 4, + "comment": "Asymmetric Encrypted Non-C2 - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-data-exfiltration-indicators" + } + ] + }, + { + "techniqueID": "T1048.003", + "score": 19, + "comment": "Unencrypted/Obfuscated Non-C2 - Referenced in 5 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "5" + }, + { + "name": "skills", + "value": "conducting-full-scope-red-team-engagement, hunting-for-data-exfiltration-indicators, hunting-for-dns-tunneling-with-zeek, implementing-continuous-security-validation-with-bas, implementing-mitre-attack-coverage-mapping" + } + ] + }, + { + "techniqueID": "T1052", + "score": 4, + "comment": "Exfiltration Over Physical Medium - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-data-exfiltration-indicators" + } + ] + }, + { + "techniqueID": "T1053", + "score": 23, + "comment": "Scheduled Task/Job - Referenced in 6 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "6" + }, + { + "name": "skills", + "value": "analyzing-apt-group-with-mitre-navigator, analyzing-persistence-mechanisms-in-linux, hunting-for-persistence-mechanisms-in-windows, implementing-mitre-attack-coverage-mapping, implementing-siem-use-cases-for-detection (+1 more)" + } + ] + }, + { + "techniqueID": "T1053.002", + "score": 4, + "comment": "At - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-scheduled-task-persistence" + } + ] + }, + { + "techniqueID": "T1053.003", + "score": 8, + "comment": "Cron - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "hunting-for-scheduled-task-persistence, performing-privilege-escalation-on-linux" + } + ] + }, + { + "techniqueID": "T1053.005", + "score": 62, + "comment": "Scheduled Task - Referenced in 16 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "16" + }, + { + "name": "skills", + "value": "analyzing-apt-group-with-mitre-navigator, analyzing-windows-event-logs-in-splunk, building-attack-pattern-library-from-cti-reports, building-detection-rule-with-splunk-spl, conducting-full-scope-red-team-engagement (+11 more)" + } + ] + }, + { + "techniqueID": "T1055", + "score": 65, + "comment": "Process Injection - Referenced in 17 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "17" + }, + { + "name": "skills", + "value": "building-attack-pattern-library-from-cti-reports, building-red-team-c2-infrastructure-with-havoc, conducting-full-scope-red-team-engagement, detecting-evasion-techniques-in-endpoint-logs, detecting-process-hollowing-technique (+12 more)" + } + ] + }, + { + "techniqueID": "T1055.001", + "score": 15, + "comment": "DLL Injection - Referenced in 4 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "4" + }, + { + "name": "skills", + "value": "detecting-process-hollowing-technique, detecting-process-injection-techniques, detecting-t1055-process-injection-with-sysmon, hunting-for-process-injection-techniques" + } + ] + }, + { + "techniqueID": "T1055.002", + "score": 8, + "comment": "Portable Executable Injection - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "detecting-process-injection-techniques, detecting-t1055-process-injection-with-sysmon" + } + ] + }, + { + "techniqueID": "T1055.003", + "score": 12, + "comment": "Thread Execution Hijacking - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "detecting-process-hollowing-technique, detecting-process-injection-techniques, detecting-t1055-process-injection-with-sysmon" + } + ] + }, + { + "techniqueID": "T1055.004", + "score": 12, + "comment": "APC Injection - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "detecting-process-hollowing-technique, detecting-process-injection-techniques, detecting-t1055-process-injection-with-sysmon" + } + ] + }, + { + "techniqueID": "T1055.005", + "score": 8, + "comment": "Thread Local Storage - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "detecting-process-injection-techniques, detecting-t1055-process-injection-with-sysmon" + } + ] + }, + { + "techniqueID": "T1055.008", + "score": 4, + "comment": "Ptrace System Calls - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "detecting-process-injection-techniques" + } + ] + }, + { + "techniqueID": "T1055.009", + "score": 4, + "comment": "Proc Memory - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "detecting-process-injection-techniques" + } + ] + }, + { + "techniqueID": "T1055.011", + "score": 4, + "comment": "Extra Window Memory Injection - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "detecting-process-injection-techniques" + } + ] + }, + { + "techniqueID": "T1055.012", + "score": 23, + "comment": "Process Hollowing - Referenced in 6 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "6" + }, + { + "name": "skills", + "value": "conducting-malware-incident-response, detecting-fileless-malware-techniques, detecting-process-hollowing-technique, detecting-process-injection-techniques, detecting-t1055-process-injection-with-sysmon (+1 more)" + } + ] + }, + { + "techniqueID": "T1055.013", + "score": 12, + "comment": "Process Doppelganging - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "detecting-process-hollowing-technique, detecting-process-injection-techniques, detecting-t1055-process-injection-with-sysmon" + } + ] + }, + { + "techniqueID": "T1055.014", + "score": 4, + "comment": "VDSO Hijacking - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "detecting-process-injection-techniques" + } + ] + }, + { + "techniqueID": "T1055.015", + "score": 8, + "comment": "ListPlanting - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "detecting-process-injection-techniques, detecting-t1055-process-injection-with-sysmon" + } + ] + }, + { + "techniqueID": "T1059", + "score": 38, + "comment": "Command and Scripting Interpreter - Referenced in 10 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "10" + }, + { + "name": "skills", + "value": "analyzing-apt-group-with-mitre-navigator, analyzing-threat-actor-ttps-with-mitre-attack, analyzing-windows-event-logs-in-splunk, building-incident-timeline-with-timesketch, deobfuscating-powershell-obfuscated-malware (+5 more)" + } + ] + }, + { + "techniqueID": "T1059.001", + "score": 100, + "comment": "PowerShell - Referenced in 26 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "26" + }, + { + "name": "skills", + "value": "analyzing-apt-group-with-mitre-navigator, analyzing-macro-malware-in-office-documents, analyzing-powershell-empire-artifacts, analyzing-security-logs-with-splunk, analyzing-threat-actor-ttps-with-mitre-navigator (+21 more)" + } + ] + }, + { + "techniqueID": "T1059.003", + "score": 12, + "comment": "Windows Command Shell - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "building-attack-pattern-library-from-cti-reports, detecting-suspicious-powershell-execution, mapping-mitre-attack-techniques" + } + ] + }, + { + "techniqueID": "T1059.005", + "score": 15, + "comment": "Visual Basic - Referenced in 4 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "4" + }, + { + "name": "skills", + "value": "analyzing-macro-malware-in-office-documents, executing-red-team-exercise, hunting-for-lolbins-execution-in-endpoint-logs, mapping-mitre-attack-techniques" + } + ] + }, + { + "techniqueID": "T1068", + "score": 31, + "comment": "Exploitation for Privilege Escalation - Referenced in 8 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "8" + }, + { + "name": "skills", + "value": "conducting-full-scope-red-team-engagement, detecting-container-escape-attempts, detecting-privilege-escalation-attempts, detecting-privilege-escalation-in-kubernetes-pods, exploiting-nopac-cve-2021-42278-42287 (+3 more)" + } + ] + }, + { + "techniqueID": "T1069.001", + "score": 4, + "comment": "Local Groups - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-active-directory-bloodhound-analysis" + } + ] + }, + { + "techniqueID": "T1069.002", + "score": 15, + "comment": "Domain Groups - Referenced in 4 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "4" + }, + { + "name": "skills", + "value": "conducting-internal-reconnaissance-with-bloodhound-ce, exploiting-active-directory-with-bloodhound, performing-active-directory-bloodhound-analysis, performing-kerberoasting-attack" + } + ] + }, + { + "techniqueID": "T1070", + "score": 12, + "comment": "Indicator Removal - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "detecting-evasion-techniques-in-endpoint-logs, implementing-siem-use-cases-for-detection, implementing-velociraptor-for-ir-collection" + } + ] + }, + { + "techniqueID": "T1070.001", + "score": 12, + "comment": "Clear Windows Event Logs - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "detecting-evasion-techniques-in-endpoint-logs, implementing-mitre-attack-coverage-mapping, performing-purple-team-exercise" + } + ] + }, + { + "techniqueID": "T1070.004", + "score": 4, + "comment": "File Deletion - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "implementing-threat-modeling-with-mitre-attack" + } + ] + }, + { + "techniqueID": "T1070.006", + "score": 8, + "comment": "Timestomping - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "detecting-evasion-techniques-in-endpoint-logs, hunting-for-defense-evasion-via-timestomping" + } + ] + }, + { + "techniqueID": "T1071", + "score": 38, + "comment": "Application Layer Protocol - Referenced in 10 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "10" + }, + { + "name": "skills", + "value": "analyzing-apt-group-with-mitre-navigator, analyzing-network-covert-channels-in-malware, analyzing-ransomware-network-indicators, analyzing-threat-actor-ttps-with-mitre-attack, hunting-advanced-persistent-threats (+5 more)" + } + ] + }, + { + "techniqueID": "T1071.001", + "score": 46, + "comment": "Web Protocols - Referenced in 12 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "12" + }, + { + "name": "skills", + "value": "analyzing-apt-group-with-mitre-navigator, building-c2-infrastructure-with-sliver-framework, building-red-team-c2-infrastructure-with-havoc, conducting-malware-incident-response, detecting-process-injection-techniques (+7 more)" + } + ] + }, + { + "techniqueID": "T1071.004", + "score": 27, + "comment": "DNS - Referenced in 7 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "7" + }, + { + "name": "skills", + "value": "building-attack-pattern-library-from-cti-reports, building-c2-infrastructure-with-sliver-framework, hunting-for-beaconing-with-frequency-analysis, hunting-for-command-and-control-beaconing, hunting-for-dns-tunneling-with-zeek (+2 more)" + } + ] + }, + { + "techniqueID": "T1074", + "score": 12, + "comment": "Data Staged - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "building-attack-pattern-library-from-cti-reports, executing-red-team-exercise, hunting-for-data-staging-before-exfiltration" + } + ] + }, + { + "techniqueID": "T1074.001", + "score": 4, + "comment": "Local Data Staging - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-data-staging-before-exfiltration" + } + ] + }, + { + "techniqueID": "T1074.002", + "score": 4, + "comment": "Remote Data Staging - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-data-staging-before-exfiltration" + } + ] + }, + { + "techniqueID": "T1078", + "score": 50, + "comment": "Valid Accounts - Referenced in 13 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "13" + }, + { + "name": "skills", + "value": "analyzing-apt-group-with-mitre-navigator, analyzing-windows-event-logs-in-splunk, conducting-full-scope-red-team-engagement, conducting-internal-network-penetration-test, detecting-insider-threat-behaviors (+8 more)" + } + ] + }, + { + "techniqueID": "T1078.001", + "score": 4, + "comment": "Default Accounts - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "detecting-service-account-abuse" + } + ] + }, + { + "techniqueID": "T1078.002", + "score": 23, + "comment": "Domain Accounts - Referenced in 6 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "6" + }, + { + "name": "skills", + "value": "conducting-domain-persistence-with-dcsync, detecting-service-account-abuse, exploiting-active-directory-certificate-services-esc1, exploiting-constrained-delegation-abuse, exploiting-nopac-cve-2021-42278-42287 (+1 more)" + } + ] + }, + { + "techniqueID": "T1078.004", + "score": 12, + "comment": "Cloud Accounts - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "detecting-azure-service-principal-abuse, implementing-mitre-attack-coverage-mapping, implementing-threat-modeling-with-mitre-attack" + } + ] + }, + { + "techniqueID": "T1082", + "score": 4, + "comment": "System Information Discovery - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "conducting-full-scope-red-team-engagement" + } + ] + }, + { + "techniqueID": "T1087", + "score": 8, + "comment": "Account Discovery - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "conducting-full-scope-red-team-engagement, implementing-continuous-security-validation-with-bas" + } + ] + }, + { + "techniqueID": "T1087.002", + "score": 23, + "comment": "Domain Account - Referenced in 6 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "6" + }, + { + "name": "skills", + "value": "conducting-internal-reconnaissance-with-bloodhound-ce, exploiting-active-directory-certificate-services-esc1, exploiting-active-directory-with-bloodhound, exploiting-kerberoasting-with-impacket, performing-active-directory-bloodhound-analysis (+1 more)" + } + ] + }, + { + "techniqueID": "T1087.004", + "score": 8, + "comment": "Cloud Account - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "detecting-azure-service-principal-abuse, implementing-mitre-attack-coverage-mapping" + } + ] + }, + { + "techniqueID": "T1090", + "score": 4, + "comment": "Proxy - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "implementing-mitre-attack-coverage-mapping" + } + ] + }, + { + "techniqueID": "T1090.002", + "score": 8, + "comment": "External Proxy - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "building-c2-infrastructure-with-sliver-framework, building-red-team-c2-infrastructure-with-havoc" + } + ] + }, + { + "techniqueID": "T1090.004", + "score": 4, + "comment": "Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-domain-fronting-c2-traffic" + } + ] + }, + { + "techniqueID": "T1091", + "score": 4, + "comment": "Replication Through Removable Media - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-physical-intrusion-assessment" + } + ] + }, + { + "techniqueID": "T1095", + "score": 8, + "comment": "Non-Application Layer Protocol - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "hunting-for-command-and-control-beaconing, hunting-for-unusual-network-connections" + } + ] + }, + { + "techniqueID": "T1098", + "score": 19, + "comment": "Account Manipulation - Referenced in 5 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "5" + }, + { + "name": "skills", + "value": "analyzing-windows-event-logs-in-splunk, conducting-domain-persistence-with-dcsync, hunting-for-t1098-account-manipulation, implementing-mitre-attack-coverage-mapping, performing-active-directory-compromise-investigation" + } + ] + }, + { + "techniqueID": "T1098.001", + "score": 12, + "comment": "Additional Cloud Credentials - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "conducting-cloud-penetration-testing, detecting-azure-service-principal-abuse, implementing-mitre-attack-coverage-mapping" + } + ] + }, + { + "techniqueID": "T1098.002", + "score": 4, + "comment": "Additional Email Delegate Permissions - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "detecting-email-forwarding-rules-attack" + } + ] + }, + { + "techniqueID": "T1102", + "score": 4, + "comment": "Web Service - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-living-off-the-cloud-techniques" + } + ] + }, + { + "techniqueID": "T1105", + "score": 23, + "comment": "Ingress Tool Transfer - Referenced in 6 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "6" + }, + { + "name": "skills", + "value": "building-c2-infrastructure-with-sliver-framework, building-red-team-c2-infrastructure-with-havoc, detecting-living-off-the-land-with-lolbas, implementing-mitre-attack-coverage-mapping, implementing-siem-use-cases-for-detection (+1 more)" + } + ] + }, + { + "techniqueID": "T1110", + "score": 15, + "comment": "Brute Force - Referenced in 4 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "4" + }, + { + "name": "skills", + "value": "analyzing-windows-event-logs-in-splunk, conducting-internal-network-penetration-test, implementing-mitre-attack-coverage-mapping, performing-alert-triage-with-elastic-siem" + } + ] + }, + { + "techniqueID": "T1110.001", + "score": 15, + "comment": "Password Guessing - Referenced in 4 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "4" + }, + { + "name": "skills", + "value": "analyzing-windows-event-logs-in-splunk, building-detection-rule-with-splunk-spl, implementing-siem-use-cases-for-detection, performing-false-positive-reduction-in-siem" + } + ] + }, + { + "techniqueID": "T1110.002", + "score": 4, + "comment": "Password Cracking - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "exploiting-kerberoasting-with-impacket" + } + ] + }, + { + "techniqueID": "T1110.003", + "score": 4, + "comment": "Password Spraying - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "implementing-siem-use-cases-for-detection" + } + ] + }, + { + "techniqueID": "T1112", + "score": 4, + "comment": "Modify Registry - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "detecting-fileless-malware-techniques" + } + ] + }, + { + "techniqueID": "T1114.002", + "score": 4, + "comment": "Remote Email Collection - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "detecting-email-forwarding-rules-attack" + } + ] + }, + { + "techniqueID": "T1114.003", + "score": 8, + "comment": "Email Forwarding Rule - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "detecting-business-email-compromise, detecting-email-forwarding-rules-attack" + } + ] + }, + { + "techniqueID": "T1127", + "score": 8, + "comment": "Trusted Developer Utilities Proxy Execution - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "detecting-evasion-techniques-in-endpoint-logs, detecting-living-off-the-land-with-lolbas" + } + ] + }, + { + "techniqueID": "T1127.001", + "score": 4, + "comment": "MSBuild - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-lolbins-execution-in-endpoint-logs" + } + ] + }, + { + "techniqueID": "T1132", + "score": 4, + "comment": "Data Encoding - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-command-and-control-beaconing" + } + ] + }, + { + "techniqueID": "T1132.001", + "score": 4, + "comment": "Standard Encoding - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "building-c2-infrastructure-with-sliver-framework" + } + ] + }, + { + "techniqueID": "T1133", + "score": 4, + "comment": "External Remote Services - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-threat-landscape-assessment-for-sector" + } + ] + }, + { + "techniqueID": "T1134", + "score": 8, + "comment": "Access Token Manipulation - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "analyzing-windows-event-logs-in-splunk, detecting-privilege-escalation-attempts" + } + ] + }, + { + "techniqueID": "T1134.001", + "score": 4, + "comment": "Token Impersonation/Theft - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "exploiting-constrained-delegation-abuse" + } + ] + }, + { + "techniqueID": "T1134.005", + "score": 4, + "comment": "SID-History Injection - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-active-directory-compromise-investigation" + } + ] + }, + { + "techniqueID": "T1136", + "score": 8, + "comment": "Create Account - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "detecting-privilege-escalation-in-kubernetes-pods, implementing-mitre-attack-coverage-mapping" + } + ] + }, + { + "techniqueID": "T1136.001", + "score": 4, + "comment": "Local Account - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "analyzing-windows-event-logs-in-splunk" + } + ] + }, + { + "techniqueID": "T1136.002", + "score": 4, + "comment": "Domain Account - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "exploiting-nopac-cve-2021-42278-42287" + } + ] + }, + { + "techniqueID": "T1140", + "score": 12, + "comment": "Deobfuscate/Decode Files or Information - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "detecting-living-off-the-land-with-lolbas, hunting-for-living-off-the-land-binaries, hunting-for-lolbins-execution-in-endpoint-logs" + } + ] + }, + { + "techniqueID": "T1190", + "score": 15, + "comment": "Exploit Public-Facing Application - Referenced in 4 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "4" + }, + { + "name": "skills", + "value": "conducting-full-scope-red-team-engagement, exploiting-ms17-010-eternalblue-vulnerability, hunting-for-webshell-activity, performing-threat-landscape-assessment-for-sector" + } + ] + }, + { + "techniqueID": "T1195", + "score": 8, + "comment": "Supply Chain Compromise - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "analyzing-supply-chain-malware-artifacts, performing-threat-landscape-assessment-for-sector" + } + ] + }, + { + "techniqueID": "T1195.001", + "score": 4, + "comment": "Compromise Software Dependencies - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-supply-chain-compromise" + } + ] + }, + { + "techniqueID": "T1195.002", + "score": 4, + "comment": "Compromise Software Supply Chain - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-supply-chain-compromise" + } + ] + }, + { + "techniqueID": "T1197", + "score": 8, + "comment": "BITS Jobs - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "hunting-for-living-off-the-land-binaries, hunting-for-lolbins-execution-in-endpoint-logs" + } + ] + }, + { + "techniqueID": "T1199", + "score": 8, + "comment": "Trusted Relationship - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "hunting-for-supply-chain-compromise, performing-physical-intrusion-assessment" + } + ] + }, + { + "techniqueID": "T1200", + "score": 4, + "comment": "Hardware Additions - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-physical-intrusion-assessment" + } + ] + }, + { + "techniqueID": "T1204.001", + "score": 4, + "comment": "Malicious Link - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "conducting-spearphishing-simulation-campaign" + } + ] + }, + { + "techniqueID": "T1204.002", + "score": 23, + "comment": "Malicious File - Referenced in 6 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "6" + }, + { + "name": "skills", + "value": "analyzing-macro-malware-in-office-documents, conducting-full-scope-red-team-engagement, conducting-spearphishing-simulation-campaign, implementing-siem-use-cases-for-detection, performing-dynamic-analysis-with-any-run (+1 more)" + } + ] + }, + { + "techniqueID": "T1210", + "score": 8, + "comment": "Exploitation of Remote Services - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "exploiting-ms17-010-eternalblue-vulnerability, exploiting-zerologon-vulnerability-cve-2020-1472" + } + ] + }, + { + "techniqueID": "T1213", + "score": 4, + "comment": "Data from Information Repositories - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "conducting-full-scope-red-team-engagement" + } + ] + }, + { + "techniqueID": "T1218", + "score": 23, + "comment": "System Binary Proxy Execution - Referenced in 6 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "6" + }, + { + "name": "skills", + "value": "detecting-evasion-techniques-in-endpoint-logs, detecting-living-off-the-land-with-lolbas, hunting-advanced-persistent-threats, hunting-for-living-off-the-land-binaries, hunting-for-lolbins-execution-in-endpoint-logs (+1 more)" + } + ] + }, + { + "techniqueID": "T1218.001", + "score": 8, + "comment": "Compiled HTML File - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "hunting-for-living-off-the-land-binaries, hunting-for-lolbins-execution-in-endpoint-logs" + } + ] + }, + { + "techniqueID": "T1218.002", + "score": 4, + "comment": "Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-living-off-the-land-binaries" + } + ] + }, + { + "techniqueID": "T1218.003", + "score": 8, + "comment": "CMSTP - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "hunting-for-living-off-the-land-binaries, hunting-for-lolbins-execution-in-endpoint-logs" + } + ] + }, + { + "techniqueID": "T1218.005", + "score": 12, + "comment": "Mshta - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "detecting-fileless-malware-techniques, hunting-for-living-off-the-land-binaries, hunting-for-lolbins-execution-in-endpoint-logs" + } + ] + }, + { + "techniqueID": "T1218.010", + "score": 8, + "comment": "Regsvr32 - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "hunting-for-living-off-the-land-binaries, hunting-for-lolbins-execution-in-endpoint-logs" + } + ] + }, + { + "techniqueID": "T1218.011", + "score": 12, + "comment": "Rundll32 - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "hunting-for-living-off-the-land-binaries, hunting-for-lolbins-execution-in-endpoint-logs, performing-dynamic-analysis-with-any-run" + } + ] + }, + { + "techniqueID": "T1222.001", + "score": 4, + "comment": "Windows - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "conducting-domain-persistence-with-dcsync" + } + ] + }, + { + "techniqueID": "T1482", + "score": 12, + "comment": "Domain Trust Discovery - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "conducting-internal-reconnaissance-with-bloodhound-ce, exploiting-active-directory-with-bloodhound, performing-active-directory-bloodhound-analysis" + } + ] + }, + { + "techniqueID": "T1484", + "score": 8, + "comment": "Domain Policy Modification - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "exploiting-active-directory-certificate-services-esc1, performing-active-directory-vulnerability-assessment" + } + ] + }, + { + "techniqueID": "T1484.001", + "score": 4, + "comment": "Group Policy Modification - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-active-directory-compromise-investigation" + } + ] + }, + { + "techniqueID": "T1485", + "score": 4, + "comment": "Data Destruction - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-shadow-copy-deletion" + } + ] + }, + { + "techniqueID": "T1486", + "score": 23, + "comment": "Data Encrypted for Impact - Referenced in 6 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "6" + }, + { + "name": "skills", + "value": "conducting-full-scope-red-team-engagement, hunting-for-shadow-copy-deletion, implementing-honeypot-for-ransomware-detection, implementing-mitre-attack-coverage-mapping, performing-purple-team-exercise (+1 more)" + } + ] + }, + { + "techniqueID": "T1489", + "score": 4, + "comment": "Service Stop - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "conducting-full-scope-red-team-engagement" + } + ] + }, + { + "techniqueID": "T1490", + "score": 12, + "comment": "Inhibit System Recovery - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "building-soc-playbook-for-ransomware, hunting-for-shadow-copy-deletion, performing-purple-team-exercise" + } + ] + }, + { + "techniqueID": "T1497", + "score": 4, + "comment": "Virtualization/Sandbox Evasion - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "analyzing-malware-sandbox-evasion-techniques" + } + ] + }, + { + "techniqueID": "T1505.003", + "score": 8, + "comment": "Web Shell - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "building-attack-pattern-library-from-cti-reports, hunting-for-webshell-activity" + } + ] + }, + { + "techniqueID": "T1528", + "score": 4, + "comment": "Steal Application Access Token - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "detecting-azure-service-principal-abuse" + } + ] + }, + { + "techniqueID": "T1530", + "score": 12, + "comment": "Data from Cloud Storage Object - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "detecting-insider-threat-behaviors, implementing-mitre-attack-coverage-mapping, performing-cloud-incident-containment-procedures" + } + ] + }, + { + "techniqueID": "T1534", + "score": 4, + "comment": "Internal Spearphishing - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "implementing-mitre-attack-coverage-mapping" + } + ] + }, + { + "techniqueID": "T1537", + "score": 19, + "comment": "Transfer Data to Cloud Account - Referenced in 5 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "5" + }, + { + "name": "skills", + "value": "hunting-for-data-exfiltration-indicators, hunting-for-living-off-the-cloud-techniques, implementing-mitre-attack-coverage-mapping, implementing-threat-modeling-with-mitre-attack, performing-cloud-incident-containment-procedures" + } + ] + }, + { + "techniqueID": "T1539", + "score": 8, + "comment": "Steal Web Session Cookie - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "performing-credential-access-with-lazagne, performing-initial-access-with-evilginx3" + } + ] + }, + { + "techniqueID": "T1543", + "score": 8, + "comment": "Create or Modify System Process - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "analyzing-persistence-mechanisms-in-linux, hunting-for-persistence-mechanisms-in-windows" + } + ] + }, + { + "techniqueID": "T1543.002", + "score": 4, + "comment": "Systemd Service - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-privilege-escalation-on-linux" + } + ] + }, + { + "techniqueID": "T1543.003", + "score": 12, + "comment": "Windows Service - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "hunting-for-persistence-mechanisms-in-windows, hunting-for-unusual-service-installations, implementing-mitre-attack-coverage-mapping" + } + ] + }, + { + "techniqueID": "T1546", + "score": 4, + "comment": "Event Triggered Execution - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "analyzing-persistence-mechanisms-in-linux" + } + ] + }, + { + "techniqueID": "T1546.003", + "score": 19, + "comment": "WMI Event Subscription - Referenced in 5 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "5" + }, + { + "name": "skills", + "value": "analyzing-windows-event-logs-in-splunk, detecting-fileless-malware-techniques, detecting-wmi-persistence, hunting-for-persistence-mechanisms-in-windows, hunting-for-persistence-via-wmi-subscriptions" + } + ] + }, + { + "techniqueID": "T1546.010", + "score": 4, + "comment": "AppInit DLLs - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-persistence-mechanisms-in-windows" + } + ] + }, + { + "techniqueID": "T1546.012", + "score": 8, + "comment": "IFEO Injection - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "hunting-for-persistence-mechanisms-in-windows, hunting-for-registry-persistence-mechanisms" + } + ] + }, + { + "techniqueID": "T1546.015", + "score": 8, + "comment": "COM Hijacking - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "hunting-for-persistence-mechanisms-in-windows, hunting-for-registry-persistence-mechanisms" + } + ] + }, + { + "techniqueID": "T1547", + "score": 23, + "comment": "Boot or Logon Autostart Execution - Referenced in 6 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "6" + }, + { + "name": "skills", + "value": "analyzing-apt-group-with-mitre-navigator, analyzing-malware-persistence-with-autoruns, hunting-advanced-persistent-threats, hunting-for-persistence-mechanisms-in-windows, implementing-siem-use-cases-for-detection (+1 more)" + } + ] + }, + { + "techniqueID": "T1547.001", + "score": 50, + "comment": "Registry Run Keys / Startup Folder - Referenced in 13 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "13" + }, + { + "name": "skills", + "value": "analyzing-apt-group-with-mitre-navigator, analyzing-windows-event-logs-in-splunk, building-attack-pattern-library-from-cti-reports, conducting-full-scope-red-team-engagement, hunting-for-persistence-mechanisms-in-windows (+8 more)" + } + ] + }, + { + "techniqueID": "T1547.004", + "score": 8, + "comment": "Winlogon Helper DLL - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "hunting-for-persistence-mechanisms-in-windows, hunting-for-registry-persistence-mechanisms" + } + ] + }, + { + "techniqueID": "T1547.005", + "score": 4, + "comment": "Security Support Provider - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-persistence-mechanisms-in-windows" + } + ] + }, + { + "techniqueID": "T1548", + "score": 15, + "comment": "Abuse Elevation Control Mechanism - Referenced in 4 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "4" + }, + { + "name": "skills", + "value": "detecting-container-escape-attempts, detecting-privilege-escalation-in-kubernetes-pods, detecting-t1548-abuse-elevation-control-mechanism, performing-privilege-escalation-assessment" + } + ] + }, + { + "techniqueID": "T1548.001", + "score": 12, + "comment": "Setuid and Setgid - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "detecting-privilege-escalation-in-kubernetes-pods, detecting-t1548-abuse-elevation-control-mechanism, performing-privilege-escalation-on-linux" + } + ] + }, + { + "techniqueID": "T1548.002", + "score": 12, + "comment": "Bypass User Account Control - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "conducting-full-scope-red-team-engagement, detecting-privilege-escalation-attempts, detecting-t1548-abuse-elevation-control-mechanism" + } + ] + }, + { + "techniqueID": "T1548.003", + "score": 12, + "comment": "Sudo and Sudo Caching - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "detecting-t1548-abuse-elevation-control-mechanism, performing-privilege-escalation-assessment, performing-privilege-escalation-on-linux" + } + ] + }, + { + "techniqueID": "T1548.004", + "score": 4, + "comment": "Elevated Execution with Prompt - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "detecting-t1548-abuse-elevation-control-mechanism" + } + ] + }, + { + "techniqueID": "T1550", + "score": 4, + "comment": "Use Alternate Authentication Material - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-lateral-movement-detection" + } + ] + }, + { + "techniqueID": "T1550.002", + "score": 35, + "comment": "Pass the Hash - Referenced in 9 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "9" + }, + { + "name": "skills", + "value": "analyzing-windows-event-logs-in-splunk, building-attack-pattern-library-from-cti-reports, conducting-full-scope-red-team-engagement, detecting-lateral-movement-in-network, detecting-pass-the-hash-attacks (+4 more)" + } + ] + }, + { + "techniqueID": "T1550.003", + "score": 15, + "comment": "Pass the Ticket - Referenced in 4 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "4" + }, + { + "name": "skills", + "value": "conducting-pass-the-ticket-attack, detecting-pass-the-hash-attacks, detecting-pass-the-ticket-attacks, exploiting-constrained-delegation-abuse" + } + ] + }, + { + "techniqueID": "T1550.004", + "score": 4, + "comment": "Web Session Cookie - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-initial-access-with-evilginx3" + } + ] + }, + { + "techniqueID": "T1552", + "score": 4, + "comment": "Unsecured Credentials - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-cloud-incident-containment-procedures" + } + ] + }, + { + "techniqueID": "T1552.001", + "score": 4, + "comment": "Credentials In Files - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-credential-access-with-lazagne" + } + ] + }, + { + "techniqueID": "T1552.002", + "score": 4, + "comment": "Credentials in Registry - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-credential-access-with-lazagne" + } + ] + }, + { + "techniqueID": "T1552.005", + "score": 4, + "comment": "Cloud Instance Metadata API - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "conducting-cloud-penetration-testing" + } + ] + }, + { + "techniqueID": "T1555", + "score": 4, + "comment": "Credentials from Password Stores - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-credential-access-with-lazagne" + } + ] + }, + { + "techniqueID": "T1555.003", + "score": 4, + "comment": "Web Browsers - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-credential-access-with-lazagne" + } + ] + }, + { + "techniqueID": "T1555.004", + "score": 4, + "comment": "Windows Credential Manager - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-credential-access-with-lazagne" + } + ] + }, + { + "techniqueID": "T1556", + "score": 4, + "comment": "Modify Authentication Process - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-initial-access-with-evilginx3" + } + ] + }, + { + "techniqueID": "T1557", + "score": 4, + "comment": "Adversary-in-the-Middle - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-initial-access-with-evilginx3" + } + ] + }, + { + "techniqueID": "T1557.001", + "score": 8, + "comment": "LLMNR/NBT-NS Poisoning - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "conducting-internal-network-penetration-test, hunting-for-ntlm-relay-attacks" + } + ] + }, + { + "techniqueID": "T1558", + "score": 19, + "comment": "Steal or Forge Kerberos Tickets - Referenced in 5 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "5" + }, + { + "name": "skills", + "value": "analyzing-windows-event-logs-in-splunk, conducting-pass-the-ticket-attack, exploiting-kerberoasting-with-impacket, exploiting-nopac-cve-2021-42278-42287, performing-lateral-movement-detection" + } + ] + }, + { + "techniqueID": "T1558.001", + "score": 27, + "comment": "Golden Ticket - Referenced in 7 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "7" + }, + { + "name": "skills", + "value": "analyzing-windows-event-logs-in-splunk, conducting-domain-persistence-with-dcsync, detecting-golden-ticket-forgery, detecting-kerberoasting-attacks, detecting-mimikatz-execution-patterns (+2 more)" + } + ] + }, + { + "techniqueID": "T1558.002", + "score": 4, + "comment": "Silver Ticket - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-active-directory-compromise-investigation" + } + ] + }, + { + "techniqueID": "T1558.003", + "score": 54, + "comment": "Kerberoasting - Referenced in 14 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "14" + }, + { + "name": "skills", + "value": "analyzing-windows-event-logs-in-splunk, building-attack-pattern-library-from-cti-reports, conducting-full-scope-red-team-engagement, conducting-internal-network-penetration-test, detecting-kerberoasting-attacks (+9 more)" + } + ] + }, + { + "techniqueID": "T1558.004", + "score": 4, + "comment": "AS-REP Roasting - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "detecting-kerberoasting-attacks" + } + ] + }, + { + "techniqueID": "T1560", + "score": 8, + "comment": "Archive Collected Data - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "conducting-full-scope-red-team-engagement, hunting-for-data-staging-before-exfiltration" + } + ] + }, + { + "techniqueID": "T1562", + "score": 4, + "comment": "Impair Defenses - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "detecting-evasion-techniques-in-endpoint-logs" + } + ] + }, + { + "techniqueID": "T1562.001", + "score": 4, + "comment": "Disable or Modify Tools - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "detecting-suspicious-powershell-execution" + } + ] + }, + { + "techniqueID": "T1566", + "score": 23, + "comment": "Phishing - Referenced in 6 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "6" + }, + { + "name": "skills", + "value": "analyzing-apt-group-with-mitre-navigator, analyzing-threat-actor-ttps-with-mitre-attack, analyzing-threat-landscape-with-misp, building-attack-pattern-library-from-cti-reports, implementing-mitre-attack-coverage-mapping (+1 more)" + } + ] + }, + { + "techniqueID": "T1566.001", + "score": 58, + "comment": "Spearphishing Attachment - Referenced in 15 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "15" + }, + { + "name": "skills", + "value": "analyzing-apt-group-with-mitre-navigator, analyzing-macro-malware-in-office-documents, analyzing-threat-actor-ttps-with-mitre-navigator, building-attack-pattern-library-from-cti-reports, conducting-full-scope-red-team-engagement (+10 more)" + } + ] + }, + { + "techniqueID": "T1566.002", + "score": 23, + "comment": "Spearphishing Link - Referenced in 6 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "6" + }, + { + "name": "skills", + "value": "building-attack-pattern-library-from-cti-reports, conducting-spearphishing-simulation-campaign, hunting-for-spearphishing-indicators, implementing-continuous-security-validation-with-bas, implementing-mitre-attack-coverage-mapping (+1 more)" + } + ] + }, + { + "techniqueID": "T1566.003", + "score": 12, + "comment": "Spearphishing via Service - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "conducting-spearphishing-simulation-campaign, hunting-for-spearphishing-indicators, implementing-continuous-security-validation-with-bas" + } + ] + }, + { + "techniqueID": "T1566.004", + "score": 4, + "comment": "Spearphishing Voice - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "conducting-social-engineering-pretext-call" + } + ] + }, + { + "techniqueID": "T1567", + "score": 15, + "comment": "Exfiltration Over Web Service - Referenced in 4 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "4" + }, + { + "name": "skills", + "value": "detecting-insider-threat-behaviors, hunting-for-data-exfiltration-indicators, hunting-for-living-off-the-cloud-techniques, implementing-continuous-security-validation-with-bas" + } + ] + }, + { + "techniqueID": "T1567.002", + "score": 4, + "comment": "Exfiltration to Cloud Storage - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-data-exfiltration-indicators" + } + ] + }, + { + "techniqueID": "T1568", + "score": 8, + "comment": "Dynamic Resolution - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "hunting-for-command-and-control-beaconing, implementing-mitre-attack-coverage-mapping" + } + ] + }, + { + "techniqueID": "T1568.002", + "score": 4, + "comment": "Domain Generation Algorithms - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "hunting-for-beaconing-with-frequency-analysis" + } + ] + }, + { + "techniqueID": "T1569.002", + "score": 12, + "comment": "Service Execution - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "detecting-lateral-movement-in-network, detecting-lateral-movement-with-splunk, exploiting-ms17-010-eternalblue-vulnerability" + } + ] + }, + { + "techniqueID": "T1570", + "score": 12, + "comment": "Lateral Tool Transfer - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "detecting-lateral-movement-in-network, detecting-lateral-movement-with-splunk, performing-lateral-movement-with-wmiexec" + } + ] + }, + { + "techniqueID": "T1571", + "score": 8, + "comment": "Non-Standard Port - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "hunting-for-unusual-network-connections, implementing-mitre-attack-coverage-mapping" + } + ] + }, + { + "techniqueID": "T1572", + "score": 15, + "comment": "Protocol Tunneling - Referenced in 4 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "4" + }, + { + "name": "skills", + "value": "building-c2-infrastructure-with-sliver-framework, hunting-for-command-and-control-beaconing, hunting-for-dns-tunneling-with-zeek, implementing-mitre-attack-coverage-mapping" + } + ] + }, + { + "techniqueID": "T1573", + "score": 15, + "comment": "Encrypted Channel - Referenced in 4 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "4" + }, + { + "name": "skills", + "value": "analyzing-ransomware-network-indicators, hunting-for-beaconing-with-frequency-analysis, hunting-for-command-and-control-beaconing, implementing-mitre-attack-coverage-mapping" + } + ] + }, + { + "techniqueID": "T1573.002", + "score": 8, + "comment": "Asymmetric Cryptography - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "building-c2-infrastructure-with-sliver-framework, building-red-team-c2-infrastructure-with-havoc" + } + ] + }, + { + "techniqueID": "T1574", + "score": 4, + "comment": "Hijack Execution Flow - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "analyzing-persistence-mechanisms-in-linux" + } + ] + }, + { + "techniqueID": "T1574.001", + "score": 8, + "comment": "DLL Search Order Hijacking - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "detecting-dll-sideloading-attacks, hunting-for-persistence-mechanisms-in-windows" + } + ] + }, + { + "techniqueID": "T1574.002", + "score": 15, + "comment": "DLL Side-Loading - Referenced in 4 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "4" + }, + { + "name": "skills", + "value": "analyzing-windows-event-logs-in-splunk, building-attack-pattern-library-from-cti-reports, detecting-dll-sideloading-attacks, implementing-siem-use-cases-for-detection" + } + ] + }, + { + "techniqueID": "T1574.006", + "score": 8, + "comment": "Dynamic Linker Hijacking - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "detecting-dll-sideloading-attacks, performing-privilege-escalation-on-linux" + } + ] + }, + { + "techniqueID": "T1574.008", + "score": 4, + "comment": "Path Interception by Search Order Hijacking - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "detecting-dll-sideloading-attacks" + } + ] + }, + { + "techniqueID": "T1574.009", + "score": 4, + "comment": "Unquoted Service Path - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "detecting-privilege-escalation-attempts" + } + ] + }, + { + "techniqueID": "T1578", + "score": 4, + "comment": "Modify Cloud Compute Infrastructure - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-cloud-incident-containment-procedures" + } + ] + }, + { + "techniqueID": "T1580", + "score": 4, + "comment": "Cloud Infrastructure Discovery - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "implementing-mitre-attack-coverage-mapping" + } + ] + }, + { + "techniqueID": "T1583.001", + "score": 15, + "comment": "Domains - Referenced in 4 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "4" + }, + { + "name": "skills", + "value": "building-red-team-c2-infrastructure-with-havoc, conducting-full-scope-red-team-engagement, conducting-spearphishing-simulation-campaign, implementing-mitre-attack-coverage-mapping" + } + ] + }, + { + "techniqueID": "T1583.003", + "score": 4, + "comment": "Virtual Private Server - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "building-red-team-c2-infrastructure-with-havoc" + } + ] + }, + { + "techniqueID": "T1585.002", + "score": 4, + "comment": "Email Accounts - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "conducting-spearphishing-simulation-campaign" + } + ] + }, + { + "techniqueID": "T1587.001", + "score": 8, + "comment": "Malware - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "building-red-team-c2-infrastructure-with-havoc, conducting-full-scope-red-team-engagement" + } + ] + }, + { + "techniqueID": "T1589", + "score": 12, + "comment": "Gather Victim Identity Information - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "conducting-full-scope-red-team-engagement, conducting-social-engineering-pretext-call, performing-open-source-intelligence-gathering" + } + ] + }, + { + "techniqueID": "T1590", + "score": 4, + "comment": "Gather Victim Network Information - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-open-source-intelligence-gathering" + } + ] + }, + { + "techniqueID": "T1591", + "score": 12, + "comment": "Gather Victim Org Information - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "collecting-open-source-intelligence, conducting-social-engineering-pretext-call, performing-open-source-intelligence-gathering" + } + ] + }, + { + "techniqueID": "T1592", + "score": 4, + "comment": "Gather Victim Host Information - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-open-source-intelligence-gathering" + } + ] + }, + { + "techniqueID": "T1593", + "score": 8, + "comment": "Search Open Websites/Domains - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "conducting-full-scope-red-team-engagement, performing-open-source-intelligence-gathering" + } + ] + }, + { + "techniqueID": "T1594", + "score": 4, + "comment": "Search Victim-Owned Websites - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-open-source-intelligence-gathering" + } + ] + }, + { + "techniqueID": "T1595.001", + "score": 4, + "comment": "Scanning IP Blocks - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-open-source-intelligence-gathering" + } + ] + }, + { + "techniqueID": "T1595.002", + "score": 4, + "comment": "Vulnerability Scanning - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-open-source-intelligence-gathering" + } + ] + }, + { + "techniqueID": "T1596", + "score": 4, + "comment": "Search Open Technical Databases - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "performing-open-source-intelligence-gathering" + } + ] + }, + { + "techniqueID": "T1598", + "score": 4, + "comment": "Phishing for Information - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "conducting-social-engineering-pretext-call" + } + ] + }, + { + "techniqueID": "T1598.003", + "score": 8, + "comment": "Spearphishing Link/Voice - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "conducting-social-engineering-pretext-call, conducting-spearphishing-simulation-campaign" + } + ] + }, + { + "techniqueID": "T1608.001", + "score": 4, + "comment": "Upload Malware - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "conducting-spearphishing-simulation-campaign" + } + ] + }, + { + "techniqueID": "T1608.005", + "score": 4, + "comment": "Link Target - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "conducting-spearphishing-simulation-campaign" + } + ] + }, + { + "techniqueID": "T1610", + "score": 8, + "comment": "Deploy Container - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "detecting-container-escape-attempts, detecting-container-escape-with-falco-rules" + } + ] + }, + { + "techniqueID": "T1611", + "score": 8, + "comment": "Escape to Host - Referenced in 2 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "2" + }, + { + "name": "skills", + "value": "detecting-container-escape-attempts, detecting-container-escape-with-falco-rules" + } + ] + }, + { + "techniqueID": "T1615", + "score": 12, + "comment": "Group Policy Discovery - Referenced in 3 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "3" + }, + { + "name": "skills", + "value": "conducting-internal-reconnaissance-with-bloodhound-ce, exploiting-active-directory-with-bloodhound, performing-active-directory-bloodhound-analysis" + } + ] + }, + { + "techniqueID": "T1649", + "score": 4, + "comment": "Steal or Forge Authentication Certificates - Referenced in 1 skill(s)", + "enabled": true, + "metadata": [ + { + "name": "skill_count", + "value": "1" + }, + { + "name": "skills", + "value": "exploiting-active-directory-certificate-services-esc1" + } + ] + } + ], + "gradient": { + "colors": [ + "#cfe2f3", + "#6fa8dc", + "#1155cc" + ], + "minValue": 1, + "maxValue": 100 + }, + "legendItems": [ + { + "label": "1-2 skills (Low coverage)", + "color": "#cfe2f3" + }, + { + "label": "3-5 skills (Moderate coverage)", + "color": "#6fa8dc" + }, + { + "label": "6-10 skills (Good coverage)", + "color": "#3d85c6" + }, + { + "label": "11+ skills (Strong coverage)", + "color": "#1155cc" + } + ], + "showTacticRowBackground": true, + "tacticRowBackground": "#205080", + "selectTechniquesAcrossTactics": true, + "selectSubtechniquesWithParent": true, + "selectVisibleTechniques": false, + "metadata": [ + { + "name": "repository", + "value": "Anthropic-Cybersecurity-Skills" + }, + { + "name": "total_techniques", + "value": "218" + }, + { + "name": "total_skills_scanned", + "value": "742" + }, + { + "name": "generated_date", + "value": "2026-03-11" + }, + { + "name": "attack_version", + "value": "14" + }, + { + "name": "description", + "value": "Auto-generated from skill SKILL.md files referencing ATT&CK technique IDs" + } + ], + "links": [ + { + "label": "Repository", + "url": "https://github.com/anthropics/cybersecurity-skills" + }, + { + "label": "ATT&CK Navigator", + "url": "https://mitre-attack.github.io/attack-navigator/" + } + ] +} \ No newline at end of file diff --git a/personas/_shared/anthropic-cybersecurity-skills/mappings/mitre-attack/README.md b/personas/_shared/anthropic-cybersecurity-skills/mappings/mitre-attack/README.md new file mode 100644 index 0000000..d0e21e5 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/mappings/mitre-attack/README.md @@ -0,0 +1,102 @@ +# MITRE ATT&CK Mapping + +This directory maps the cybersecurity skills in this repository to the [MITRE ATT&CK](https://attack.mitre.org/) framework (Enterprise v15). + +## Overview + +MITRE ATT&CK is a curated knowledge base and model for cyber adversary behavior, reflecting the various phases of an adversary's lifecycle and the platforms they target. This mapping connects our hands-on skills to ATT&CK tactics and techniques, enabling: + +- **Threat-informed defense** -- prioritize skill development based on real adversary behavior +- **Gap analysis** -- identify ATT&CK techniques not yet covered by available skills +- **Purple team exercises** -- pair offensive (red team) and defensive (blue team) skills for each technique +- **Agent-driven discovery** -- AI agents can query skills by ATT&CK ID for automated security workflows + +## Mapping Methodology + +### Tactic Mapping (TA00xx) + +Each of the 14 ATT&CK Enterprise tactics represents a distinct adversary objective. Skills are mapped to tactics based on which adversary goal they help achieve (offensive) or defend against (defensive): + +| Tactic | ID | Offensive Skills | Defensive Skills | +|--------|-----|-----------------|------------------| +| Reconnaissance | TA0043 | penetration-testing, red-teaming | threat-intelligence, phishing-defense | +| Resource Development | TA0042 | red-teaming | threat-intelligence | +| Initial Access | TA0001 | web-application-security, penetration-testing | phishing-defense, endpoint-security | +| Execution | TA0002 | penetration-testing, red-teaming | malware-analysis, endpoint-security, soc-operations | +| Persistence | TA0003 | red-teaming, penetration-testing | threat-hunting, digital-forensics, endpoint-security | +| Privilege Escalation | TA0004 | penetration-testing, red-teaming | endpoint-security, identity-access-management | +| Defense Evasion | TA0005 | red-teaming | malware-analysis, endpoint-security, threat-hunting | +| Credential Access | TA0006 | penetration-testing, red-teaming | identity-access-management, soc-operations | +| Discovery | TA0007 | penetration-testing, red-teaming | threat-hunting, network-security | +| Lateral Movement | TA0008 | red-teaming, penetration-testing | network-security, threat-hunting, soc-operations | +| Collection | TA0009 | red-teaming | digital-forensics, threat-hunting | +| Command and Control | TA0011 | red-teaming | threat-intelligence, network-security, soc-operations | +| Exfiltration | TA0010 | red-teaming | threat-hunting, digital-forensics, network-security | +| Impact | TA0040 | red-teaming | ransomware-defense, incident-response | + +### Technique Mapping (T1xxx) + +Skills are mapped to specific techniques based on their content. Examples: + +| Technique | ID | Example Skills | +|-----------|-----|---------------| +| Phishing | T1566 | analyzing-phishing-email-headers, analyzing-certificate-transparency-for-phishing | +| Exploit Public-Facing Application | T1190 | web-application-security skills (SQL injection, XSS, SSRF) | +| OS Credential Dumping | T1003 | penetration-testing credential harvesting skills | +| PowerShell | T1059.001 | analyzing-windows-event-logs-in-splunk, malware-analysis skills | +| Remote Services | T1021 | network-security lateral movement skills | +| Data Encrypted for Impact | T1486 | analyzing-ransomware-encryption-mechanisms | +| Command and Scripting Interpreter | T1059 | malware-analysis script deobfuscation skills | +| Scheduled Task/Job | T1053 | analyzing-malware-persistence-with-autoruns | +| Registry Run Keys | T1547.001 | analyzing-windows-registry-for-artifacts | +| DLL Side-Loading | T1574.002 | analyzing-bootkit-and-rootkit-samples | + +### Sub-technique Mapping (T1xxx.xxx) + +Where applicable, skills are mapped to sub-techniques for precision. For example: + +- `T1566.001` (Spearphishing Attachment) -- analyzing-email-headers-for-phishing-investigation +- `T1566.002` (Spearphishing Link) -- analyzing-certificate-transparency-for-phishing +- `T1003.001` (LSASS Memory) -- analyzing-memory-dumps-with-volatility + +## ATT&CK Navigator Integration + +You can visualize our skill coverage using the [ATT&CK Navigator](https://mitre-attack.github.io/attack-navigator/). To generate a Navigator layer: + +1. Use the coverage summary in [`coverage-summary.md`](coverage-summary.md) to identify covered tactics +2. Import the tactic/technique IDs into a Navigator layer JSON +3. Color-code by coverage depth (number of skills per technique) + +### Suggested Color Scale + +| Coverage | Color | Meaning | +|----------|-------|---------| +| 0 skills | White | No coverage -- gap | +| 1-2 skills | Light blue | Basic coverage | +| 3-5 skills | Medium blue | Moderate coverage | +| 6+ skills | Dark blue | Strong coverage | + +## Skill Tag Convention + +Skills relevant to ATT&CK carry these tags in their YAML frontmatter: + +- `mitre-attack` -- general ATT&CK relevance (56 skills currently tagged) +- Technique-specific tags like `privilege-escalation`, `lateral-movement`, `persistence` +- Tool-specific tags that map to ATT&CK software entries (e.g., `cobalt-strike`, `mimikatz`) + +## How to Contribute Mappings + +1. **Identify the skill** -- Read the skill's SKILL.md to understand what it teaches +2. **Find the ATT&CK technique** -- Search [attack.mitre.org](https://attack.mitre.org/) for the matching technique +3. **Determine offensive vs. defensive** -- Is the skill about performing or detecting/preventing the technique? +4. **Update the mapping** -- Add the technique ID to the appropriate table in this directory +5. **Update skill tags** -- Add `mitre-attack` and technique-specific tags to the skill's frontmatter +6. **Submit a PR** -- Include the ATT&CK technique URL as justification + +## References + +- [MITRE ATT&CK Enterprise Matrix](https://attack.mitre.org/matrices/enterprise/) +- [MITRE ATT&CK Navigator](https://mitre-attack.github.io/attack-navigator/) +- [ATT&CK v15 Release Notes](https://attack.mitre.org/resources/updates/) +- [MITRE ATT&CK for ICS](https://attack.mitre.org/matrices/ics/) -- relevant for ot-ics-security skills +- [MITRE ATT&CK for Mobile](https://attack.mitre.org/matrices/mobile/) -- relevant for mobile-security skills diff --git a/personas/_shared/anthropic-cybersecurity-skills/mappings/mitre-attack/coverage-summary.md b/personas/_shared/anthropic-cybersecurity-skills/mappings/mitre-attack/coverage-summary.md new file mode 100644 index 0000000..6498dd2 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/mappings/mitre-attack/coverage-summary.md @@ -0,0 +1,177 @@ +# ATT&CK Coverage Summary + +Coverage analysis of the 753 cybersecurity skills mapped to MITRE ATT&CK Enterprise v15 tactics. + +## Tactic Coverage Matrix + +| ATT&CK Tactic | ID | Relevant Subdomains | Skills Count | +|---------------|-----|---------------------|--------------| +| Reconnaissance | TA0043 | threat-intelligence, penetration-testing, red-teaming | ~48 | +| Resource Development | TA0042 | threat-intelligence, red-teaming | ~30 | +| Initial Access | TA0001 | web-application-security, phishing-defense, api-security | ~45 | +| Execution | TA0002 | malware-analysis, endpoint-security, soc-operations | ~32 | +| Persistence | TA0003 | threat-hunting, digital-forensics, endpoint-security | ~28 | +| Privilege Escalation | TA0004 | penetration-testing, red-teaming, identity-access-management | ~40 | +| Defense Evasion | TA0005 | malware-analysis, endpoint-security, threat-hunting | ~25 | +| Credential Access | TA0006 | identity-access-management, penetration-testing | ~30 | +| Discovery | TA0007 | penetration-testing, threat-hunting, network-security | ~35 | +| Lateral Movement | TA0008 | red-teaming, network-security, soc-operations | ~28 | +| Collection | TA0009 | digital-forensics, threat-hunting | ~22 | +| Command and Control | TA0011 | threat-intelligence, network-security, soc-operations | ~30 | +| Exfiltration | TA0010 | threat-hunting, digital-forensics, network-security | ~20 | +| Impact | TA0040 | ransomware-defense, incident-response, ot-ics-security | ~35 | + +## Subdomain-to-Tactic Heat Map + +Shows which subdomains contribute skills to each ATT&CK tactic. Intensity indicates relevance (H = High, M = Medium, L = Low). + +| Subdomain (skills) | Recon | Res Dev | Init Access | Exec | Persist | Priv Esc | Def Evasion | Cred Access | Disc | Lat Mov | Collect | C2 | Exfil | Impact | +|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---| +| web-application-security (41) | L | - | **H** | M | L | M | L | M | L | - | - | - | - | M | +| threat-intelligence (43) | **H** | **H** | M | L | L | - | L | - | M | - | - | **H** | L | L | +| threat-hunting (35) | L | - | M | M | **H** | M | **H** | M | **H** | M | **H** | M | **H** | M | +| digital-forensics (34) | - | - | L | M | **H** | L | M | L | L | M | **H** | L | M | M | +| malware-analysis (34) | - | L | M | **H** | **H** | M | **H** | L | L | L | M | **H** | L | M | +| identity-access-management (33) | - | - | M | L | M | **H** | L | **H** | L | M | - | - | - | - | +| network-security (33) | M | - | M | L | L | L | L | L | M | **H** | L | **H** | **H** | L | +| soc-operations (33) | L | - | M | **H** | M | M | M | M | M | M | M | M | M | M | +| cloud-security (48) | M | M | **H** | M | M | **H** | M | **H** | **H** | M | M | L | M | M | +| api-security (28) | L | - | **H** | M | L | M | L | **H** | L | - | M | - | M | L | +| ot-ics-security (28) | M | L | M | M | M | L | L | M | **H** | M | **H** | M | L | **H** | +| container-security (26) | L | L | M | **H** | M | **H** | **H** | M | M | L | L | L | M | M | +| incident-response (24) | - | - | M | M | M | M | M | M | L | M | M | M | M | **H** | +| vulnerability-management (24) | M | - | **H** | M | L | M | L | L | **H** | L | - | - | - | M | +| penetration-testing (23) | **H** | M | **H** | **H** | M | **H** | M | **H** | **H** | M | M | M | M | L | +| red-teaming (24) | **H** | **H** | **H** | **H** | **H** | **H** | **H** | **H** | **H** | **H** | **H** | **H** | **H** | **H** | +| devsecops (16) | L | L | M | M | L | M | L | M | L | - | - | - | - | L | +| endpoint-security (16) | - | - | M | **H** | **H** | **H** | **H** | M | M | M | M | M | L | M | +| phishing-defense (16) | M | M | **H** | M | - | - | M | **H** | - | - | M | L | L | L | +| cryptography (13) | - | - | L | - | - | - | M | **H** | - | - | M | M | **H** | L | +| zero-trust-architecture (13) | - | - | M | L | L | **H** | L | **H** | L | **H** | L | L | M | - | +| mobile-security (12) | M | L | **H** | M | M | M | M | M | M | L | M | M | M | L | +| compliance-governance (5) | L | L | L | - | - | L | - | L | L | - | - | - | - | L | +| ransomware-defense (5) | - | - | M | M | M | L | M | - | - | - | M | M | L | **H** | + +## Key Technique Coverage + +High-confidence technique-to-skill mappings based on skill content analysis. + +### Initial Access (TA0001) -- 45 skills + +| Technique | ID | Primary Skills | +|-----------|----|---------------| +| Phishing | T1566 | analyzing-phishing-email-headers, analyzing-certificate-transparency-for-phishing, 14 phishing-defense skills | +| Exploit Public-Facing Application | T1190 | 41 web-application-security skills, 28 api-security skills | +| External Remote Services | T1133 | network-security VPN/remote access skills | +| Valid Accounts | T1078 | identity-access-management credential skills | +| Supply Chain Compromise | T1195 | analyzing-supply-chain-malware-artifacts, devsecops dependency scanning | + +### Execution (TA0002) -- 32 skills + +| Technique | ID | Primary Skills | +|-----------|----|---------------| +| Command and Scripting Interpreter | T1059 | malware-analysis script analysis skills | +| Exploitation for Client Execution | T1203 | web-application-security exploit skills | +| User Execution | T1204 | phishing-defense awareness skills | +| Container Administration Command | T1609 | container-security skills | + +### Persistence (TA0003) -- 28 skills + +| Technique | ID | Primary Skills | +|-----------|----|---------------| +| Boot or Logon Autostart Execution | T1547 | analyzing-malware-persistence-with-autoruns, analyzing-windows-registry-for-artifacts | +| Scheduled Task/Job | T1053 | endpoint-security scheduled task skills | +| Create Account | T1136 | identity-access-management monitoring skills | +| Implant Internal Image | T1525 | container-security image scanning skills | + +### Privilege Escalation (TA0004) -- 40 skills + +| Technique | ID | Primary Skills | +|-----------|----|---------------| +| Exploitation for Privilege Escalation | T1068 | penetration-testing privilege escalation skills | +| Access Token Manipulation | T1134 | identity-access-management token skills | +| Container Escape | T1611 | container-security escape detection skills | +| Domain Policy Modification | T1484 | identity-access-management AD skills | + +### Defense Evasion (TA0005) -- 25 skills + +| Technique | ID | Primary Skills | +|-----------|----|---------------| +| Obfuscated Files or Information | T1027 | analyzing-packed-malware-with-upx-unpacker, malware deobfuscation skills | +| Masquerading | T1036 | threat-hunting detection skills | +| Rootkit | T1014 | analyzing-bootkit-and-rootkit-samples | +| Indicator Removal | T1070 | digital-forensics anti-forensics skills | + +### Credential Access (TA0006) -- 30 skills + +| Technique | ID | Primary Skills | +|-----------|----|---------------| +| OS Credential Dumping | T1003 | analyzing-memory-dumps-with-volatility, penetration-testing credential skills | +| Brute Force | T1110 | identity-access-management authentication skills | +| Steal Web Session Cookie | T1539 | web-application-security session skills | +| Unsecured Credentials | T1552 | cloud-security secrets management skills | + +### Discovery (TA0007) -- 35 skills + +| Technique | ID | Primary Skills | +|-----------|----|---------------| +| Network Service Discovery | T1046 | network-security scanning skills, penetration-testing recon | +| System Information Discovery | T1082 | threat-hunting system enumeration skills | +| Cloud Infrastructure Discovery | T1580 | cloud-security asset discovery skills | +| Account Discovery | T1087 | identity-access-management enumeration skills | + +### Lateral Movement (TA0008) -- 28 skills + +| Technique | ID | Primary Skills | +|-----------|----|---------------| +| Remote Services | T1021 | network-security remote access skills | +| Lateral Tool Transfer | T1570 | threat-hunting lateral movement detection skills | +| Use Alternate Authentication Material | T1550 | identity-access-management pass-the-hash skills | +| Exploitation of Remote Services | T1210 | penetration-testing exploitation skills | + +### Collection (TA0009) -- 22 skills + +| Technique | ID | Primary Skills | +|-----------|----|---------------| +| Data from Local System | T1005 | digital-forensics disk/file analysis skills | +| Data from Network Shared Drive | T1039 | threat-hunting data access monitoring skills | +| Email Collection | T1114 | analyzing-outlook-pst-for-email-forensics | +| Screen Capture | T1113 | malware-analysis behavior analysis skills | + +### Command and Control (TA0011) -- 30 skills + +| Technique | ID | Primary Skills | +|-----------|----|---------------| +| Application Layer Protocol | T1071 | analyzing-command-and-control-communication, network-security C2 detection | +| Encrypted Channel | T1573 | analyzing-network-covert-channels-in-malware | +| Ingress Tool Transfer | T1105 | analyzing-cobalt-strike-beacon-configuration | +| Proxy | T1090 | network-security proxy analysis skills | + +### Exfiltration (TA0010) -- 20 skills + +| Technique | ID | Primary Skills | +|-----------|----|---------------| +| Exfiltration Over C2 Channel | T1041 | analyzing-dns-logs-for-exfiltration | +| Exfiltration Over Alternative Protocol | T1048 | network-security protocol analysis skills | +| Exfiltration Over Web Service | T1567 | cloud-security data loss prevention skills | + +### Impact (TA0040) -- 35 skills + +| Technique | ID | Primary Skills | +|-----------|----|---------------| +| Data Encrypted for Impact | T1486 | analyzing-ransomware-encryption-mechanisms, 5 ransomware-defense skills | +| Service Stop | T1489 | incident-response service restoration skills | +| Inhibit System Recovery | T1490 | ransomware-defense recovery skills | +| Manipulation of Control | T0831 | ot-ics-security control system skills | + +## Coverage Gaps + +Areas where additional skills would improve ATT&CK coverage: + +| Gap Area | ATT&CK Techniques | Recommendation | +|----------|-------------------|----------------| +| Firmware attacks | T1542 (Pre-OS Boot) | Add UEFI/firmware analysis skills | +| Audio/video capture | T1123, T1125 | Add surveillance detection skills | +| Cloud-specific lateral movement | T1550.001 (Web Session Cookie in cloud) | Expand cloud-security lateral movement | +| Hardware additions | T1200 | Add physical security assessment skills | +| Traffic signaling | T1205 | Add network covert channel detection skills | diff --git a/personas/_shared/anthropic-cybersecurity-skills/mappings/nist-csf/README.md b/personas/_shared/anthropic-cybersecurity-skills/mappings/nist-csf/README.md new file mode 100644 index 0000000..965abb8 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/mappings/nist-csf/README.md @@ -0,0 +1,133 @@ +# NIST Cybersecurity Framework 2.0 Mapping + +This directory maps the cybersecurity skills in this repository to the [NIST Cybersecurity Framework (CSF) 2.0](https://www.nist.gov/cyberframework), published February 2024. + +## Overview + +NIST CSF 2.0 organizes cybersecurity activities into 6 core functions that represent the full lifecycle of managing cybersecurity risk. This mapping enables organizations to: + +- **Align skill development** to their CSF implementation tier +- **Identify training gaps** across the CSF functions +- **Build role-based learning paths** using CSF categories +- **Automate compliance mapping** through AI agent queries + +## CSF 2.0 Functions and Skill Alignment + +### Govern (GV) -- Cybersecurity Risk Management Strategy + +Establishing and monitoring the organization's cybersecurity risk management strategy, expectations, and policy. + +| Category | ID | Mapped Subdomains | Skills | +|----------|-----|-------------------|--------| +| Organizational Context | GV.OC | compliance-governance | 5 | +| Risk Management Strategy | GV.RM | compliance-governance, vulnerability-management | 29 | +| Roles, Responsibilities, and Authorities | GV.RR | compliance-governance, identity-access-management | 38 | +| Policy | GV.PO | compliance-governance, zero-trust-architecture | 18 | +| Oversight | GV.OV | compliance-governance, soc-operations | 38 | +| Cybersecurity Supply Chain Risk Management | GV.SC | devsecops, container-security | 42 | + +**Primary subdomains:** compliance-governance (5), identity-access-management (33), devsecops (16) + +### Identify (ID) -- Understanding Organizational Cybersecurity Risk + +Understanding the organization's current cybersecurity risks. + +| Category | ID | Mapped Subdomains | Skills | +|----------|-----|-------------------|--------| +| Asset Management | ID.AM | cloud-security, container-security, network-security | 107 | +| Risk Assessment | ID.RA | vulnerability-management, threat-intelligence | 67 | +| Improvement | ID.IM | soc-operations, compliance-governance | 38 | + +**Primary subdomains:** vulnerability-management (24), threat-intelligence (43), cloud-security (48) + +### Protect (PR) -- Safeguarding Assets + +Using safeguards to prevent or reduce cybersecurity risk. + +| Category | ID | Mapped Subdomains | Skills | +|----------|-----|-------------------|--------| +| Identity Management, Authentication, and Access Control | PR.AA | identity-access-management, zero-trust-architecture | 46 | +| Awareness and Training | PR.AT | phishing-defense, compliance-governance | 21 | +| Data Security | PR.DS | cryptography, cloud-security, api-security | 89 | +| Platform Security | PR.PS | endpoint-security, container-security, devsecops | 58 | +| Technology Infrastructure Resilience | PR.IR | network-security, zero-trust-architecture | 46 | + +**Primary subdomains:** zero-trust-architecture (13), devsecops (16), identity-access-management (33), cryptography (13) + +### Detect (DE) -- Finding and Analyzing Cybersecurity Events + +Finding and analyzing possible cybersecurity compromises and anomalies. + +| Category | ID | Mapped Subdomains | Skills | +|----------|-----|-------------------|--------| +| Continuous Monitoring | DE.CM | soc-operations, threat-hunting, network-security | 101 | +| Adverse Event Analysis | DE.AE | threat-hunting, malware-analysis, soc-operations | 102 | + +**Primary subdomains:** threat-hunting (35), soc-operations (33), malware-analysis (34) + +### Respond (RS) -- Taking Action Regarding Detected Incidents + +Managing and responding to detected cybersecurity incidents. + +| Category | ID | Mapped Subdomains | Skills | +|----------|-----|-------------------|--------| +| Incident Management | RS.MA | incident-response, soc-operations | 57 | +| Incident Analysis | RS.AN | digital-forensics, malware-analysis, threat-intelligence | 111 | +| Incident Response Reporting and Communication | RS.CO | incident-response, compliance-governance | 29 | +| Incident Mitigation | RS.MI | incident-response, endpoint-security, network-security | 73 | + +**Primary subdomains:** incident-response (24), digital-forensics (34), malware-analysis (34) + +### Recover (RC) -- Restoring Capabilities After an Incident + +Restoring assets and operations affected by a cybersecurity incident. + +| Category | ID | Mapped Subdomains | Skills | +|----------|-----|-------------------|--------| +| Incident Recovery Plan Execution | RC.RP | incident-response, ransomware-defense | 29 | +| Incident Recovery Communication | RC.CO | incident-response, compliance-governance | 29 | + +**Primary subdomains:** incident-response (24), ransomware-defense (5) + +## Function Coverage Distribution + +``` +Govern (GV): ████████████░░░░░░░░ ~54 skills (compliance, IAM, devsecops) +Identify (ID): ██████████████████░░ ~115 skills (vuln-mgmt, threat-intel, cloud) +Protect (PR): ████████████████████ ~160 skills (IAM, ZTA, devsecops, crypto) +Detect (DE): ████████████████░░░░ ~102 skills (threat-hunting, SOC, malware) +Respond (RS): ██████████████████░░ ~111 skills (IR, forensics, malware) +Recover (RC): ████░░░░░░░░░░░░░░░░ ~29 skills (IR recovery, ransomware) +``` + +## How to Use This Mapping + +### For Organizations + +1. Determine your target CSF implementation tier (Partial, Risk Informed, Repeatable, Adaptive) +2. Identify your CSF function priorities +3. Use the category tables above to find relevant skill subdomains +4. Deploy skills from those subdomains to your team's training plan + +### For AI Agents + +Query skills by CSF function using subdomain filters: + +``` +# Find all Detect (DE) function skills +Filter: subdomain IN (threat-hunting, soc-operations, malware-analysis) + +# Find all Protect (PR) function skills +Filter: subdomain IN (identity-access-management, zero-trust-architecture, devsecops, cryptography) +``` + +### For Security Teams + +Use the alignment table in [`csf-alignment.md`](csf-alignment.md) for a complete subdomain-to-category cross-reference. + +## References + +- [NIST CSF 2.0 (February 2024)](https://www.nist.gov/cyberframework) +- [NIST SP 800-53 Rev. 5 Control Mapping](https://csrc.nist.gov/publications/detail/sp/800-53/rev-5/final) +- [CSF 2.0 Quick Start Guides](https://www.nist.gov/cyberframework/getting-started) +- [CSF 2.0 Reference Tool](https://csrc.nist.gov/Projects/Cybersecurity-Framework/Filters) diff --git a/personas/_shared/anthropic-cybersecurity-skills/mappings/nist-csf/csf-alignment.md b/personas/_shared/anthropic-cybersecurity-skills/mappings/nist-csf/csf-alignment.md new file mode 100644 index 0000000..7b376f9 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/mappings/nist-csf/csf-alignment.md @@ -0,0 +1,102 @@ +# NIST CSF 2.0 Alignment Table + +Complete mapping of each skill subdomain to NIST CSF 2.0 functions and categories. + +## Subdomain-to-CSF Alignment + +| Subdomain | Skills | GV | ID | PR | PR | DE | RS | RC | +|-----------|--------|-----|-----|-----|-----|-----|-----|-----| +| | | Govern | Identify | Protect | Protect | Detect | Respond | Recover | + +### Detailed Alignment + +| Subdomain (Skills) | Primary CSF Function | CSF Categories | Alignment Rationale | +|---------------------|---------------------|----------------|---------------------| +| api-security (28) | Protect (PR) | PR.DS, PR.PS | API hardening, authentication, input validation | +| cloud-security (48) | Identify (ID), Protect (PR) | ID.AM, PR.DS, PR.PS, PR.IR | Cloud asset management, data protection, infrastructure resilience | +| compliance-governance (5) | Govern (GV) | GV.OC, GV.RM, GV.RR, GV.PO, GV.OV | Risk strategy, policy, organizational oversight | +| container-security (26) | Protect (PR) | PR.PS, GV.SC | Platform security, supply chain risk management | +| cryptography (13) | Protect (PR) | PR.DS | Data confidentiality and integrity at rest and in transit | +| devsecops (16) | Protect (PR), Govern (GV) | PR.PS, GV.SC | Secure development lifecycle, supply chain security | +| digital-forensics (34) | Respond (RS) | RS.AN, RS.MA | Incident analysis, evidence collection and examination | +| endpoint-security (16) | Protect (PR), Detect (DE) | PR.PS, DE.CM, DE.AE | Endpoint hardening, continuous monitoring, threat detection | +| identity-access-management (33) | Protect (PR), Govern (GV) | PR.AA, GV.RR | Identity lifecycle, authentication, authorization, access governance | +| incident-response (24) | Respond (RS), Recover (RC) | RS.MA, RS.AN, RS.MI, RS.CO, RC.RP, RC.CO | Full incident lifecycle from detection through recovery | +| malware-analysis (34) | Detect (DE), Respond (RS) | DE.AE, RS.AN | Adverse event analysis, reverse engineering, threat characterization | +| mobile-security (12) | Protect (PR) | PR.PS, PR.DS | Mobile platform security, application data protection | +| network-security (33) | Protect (PR), Detect (DE) | PR.IR, DE.CM | Network infrastructure resilience, traffic monitoring | +| ot-ics-security (28) | Protect (PR), Detect (DE) | PR.PS, PR.IR, DE.CM | Industrial control system protection and monitoring | +| penetration-testing (23) | Identify (ID) | ID.RA | Risk assessment through offensive security testing | +| phishing-defense (16) | Protect (PR), Detect (DE) | PR.AT, DE.CM, DE.AE | Security awareness training, phishing detection | +| ransomware-defense (5) | Respond (RS), Recover (RC) | RS.MI, RC.RP | Ransomware mitigation and recovery planning | +| red-teaming (24) | Identify (ID) | ID.RA, ID.IM | Adversary simulation for risk assessment and program improvement | +| soc-operations (33) | Detect (DE), Respond (RS) | DE.CM, DE.AE, RS.MA | Continuous monitoring, alert triage, incident management | +| threat-hunting (35) | Detect (DE) | DE.CM, DE.AE | Proactive threat detection, hypothesis-driven analysis | +| threat-intelligence (43) | Identify (ID), Detect (DE) | ID.RA, DE.AE | Threat landscape understanding, intelligence-driven detection | +| vulnerability-management (24) | Identify (ID) | ID.RA, GV.RM | Vulnerability identification, risk assessment, remediation prioritization | +| web-application-security (41) | Protect (PR), Identify (ID) | PR.DS, PR.PS, ID.RA | Application security testing and hardening | +| zero-trust-architecture (13) | Protect (PR) | PR.AA, PR.IR | Zero trust access control and network segmentation | + +## CSF Category Coverage Summary + +### Govern (GV) + +| Category | ID | Description | Subdomain Coverage | +|----------|-----|------------|-------------------| +| Organizational Context | GV.OC | Understanding the organizational mission and stakeholder expectations | compliance-governance | +| Risk Management Strategy | GV.RM | Risk management priorities, constraints, and appetite | compliance-governance, vulnerability-management | +| Roles, Responsibilities, and Authorities | GV.RR | Cybersecurity roles and authorities are established | compliance-governance, identity-access-management | +| Policy | GV.PO | Organizational cybersecurity policy is established | compliance-governance, zero-trust-architecture | +| Oversight | GV.OV | Results of cybersecurity activities are reviewed | compliance-governance, soc-operations | +| Cybersecurity Supply Chain Risk Management | GV.SC | Supply chain risks are managed | devsecops, container-security | + +### Identify (ID) + +| Category | ID | Description | Subdomain Coverage | +|----------|-----|------------|-------------------| +| Asset Management | ID.AM | Assets that enable the organization are identified and managed | cloud-security, container-security, network-security | +| Risk Assessment | ID.RA | The cybersecurity risk to the organization is understood | vulnerability-management, threat-intelligence, penetration-testing, red-teaming | +| Improvement | ID.IM | Improvements to organizational cybersecurity are identified | soc-operations, red-teaming, compliance-governance | + +### Protect (PR) + +| Category | ID | Description | Subdomain Coverage | +|----------|-----|------------|-------------------| +| Identity Management, Authentication, and Access Control | PR.AA | Access is limited to authorized users, services, and hardware | identity-access-management, zero-trust-architecture | +| Awareness and Training | PR.AT | Personnel are provided cybersecurity awareness and training | phishing-defense, compliance-governance | +| Data Security | PR.DS | Data are managed consistent with the organization's risk strategy | cryptography, cloud-security, api-security | +| Platform Security | PR.PS | Hardware, software, and services are managed consistent with risk strategy | endpoint-security, container-security, devsecops, ot-ics-security | +| Technology Infrastructure Resilience | PR.IR | Security architectures are managed to protect asset confidentiality, integrity, and availability | network-security, zero-trust-architecture, ot-ics-security | + +### Detect (DE) + +| Category | ID | Description | Subdomain Coverage | +|----------|-----|------------|-------------------| +| Continuous Monitoring | DE.CM | Assets are monitored to find anomalies and indicators of compromise | soc-operations, threat-hunting, network-security, endpoint-security | +| Adverse Event Analysis | DE.AE | Anomalies and potential adverse events are analyzed | threat-hunting, malware-analysis, soc-operations, threat-intelligence | + +### Respond (RS) + +| Category | ID | Description | Subdomain Coverage | +|----------|-----|------------|-------------------| +| Incident Management | RS.MA | Responses to detected incidents are managed | incident-response, soc-operations | +| Incident Analysis | RS.AN | Investigations are conducted to understand the incident | digital-forensics, malware-analysis, threat-intelligence | +| Incident Response Reporting and Communication | RS.CO | Response activities are coordinated with internal and external stakeholders | incident-response, compliance-governance | +| Incident Mitigation | RS.MI | Activities are performed to prevent expansion and mitigate effects | incident-response, endpoint-security, network-security | + +### Recover (RC) + +| Category | ID | Description | Subdomain Coverage | +|----------|-----|------------|-------------------| +| Incident Recovery Plan Execution | RC.RP | Restoration activities are performed to ensure operational availability | incident-response, ransomware-defense | +| Incident Recovery Communication | RC.CO | Restoration activities are coordinated with internal and external parties | incident-response, compliance-governance | + +## Gap Analysis + +| CSF Category | Current Coverage | Gap | +|-------------|-----------------|-----| +| GV.OC | Low (5 skills) | Need more organizational security context and mission alignment skills | +| GV.PO | Low | Need dedicated policy development and management skills | +| PR.AT | Moderate (16 skills) | Could expand security awareness training beyond phishing | +| RC.RP | Low (29 skills) | Need more disaster recovery and business continuity skills | +| RC.CO | Low | Need dedicated incident communication and stakeholder management skills | diff --git a/personas/_shared/anthropic-cybersecurity-skills/mappings/owasp/README.md b/personas/_shared/anthropic-cybersecurity-skills/mappings/owasp/README.md new file mode 100644 index 0000000..78fc8c3 --- /dev/null +++ b/personas/_shared/anthropic-cybersecurity-skills/mappings/owasp/README.md @@ -0,0 +1,177 @@ +# OWASP Top 10 (2025) Mapping + +This directory maps the cybersecurity skills in this repository to the [OWASP Top 10](https://owasp.org/www-project-top-ten/) categories for web application security risks. + +## Overview + +The OWASP Top 10 represents the most critical security risks to web applications. This mapping connects hands-on skills to each risk category, enabling teams to build targeted training programs for secure development and application security testing. + +## OWASP Top 10 2025 Skill Mapping + +### A01:2025 -- Broken Access Control + +Restrictions on what authenticated users are allowed to do are not properly enforced. + +| Relevant Subdomains | Skills | Key Topics | +|---------------------|--------|------------| +| web-application-security | 41 | IDOR, privilege escalation, path traversal, CORS misconfiguration | +| identity-access-management | 33 | RBAC, ABAC, session management, OAuth/OIDC flaws | +| api-security | 28 | Broken object level authorization (BOLA), function level authorization | +| zero-trust-architecture | 13 | Least privilege enforcement, microsegmentation | + +**Example skills:** Implementing RBAC, testing for IDOR vulnerabilities, configuring OAuth 2.0 securely, enforcing API authorization policies. + +### A02:2025 -- Cryptographic Failures + +Failures related to cryptography that lead to exposure of sensitive data. + +| Relevant Subdomains | Skills | Key Topics | +|---------------------|--------|------------| +| cryptography | 13 | TLS configuration, key management, hashing, encryption at rest | +| web-application-security | 41 | HTTPS enforcement, cookie security flags, certificate validation | +| cloud-security | 48 | KMS configuration, secrets management, encryption in transit | +| api-security | 28 | API transport security, token encryption | + +**Example skills:** Configuring TLS 1.3, implementing envelope encryption with KMS, securing JWT tokens, certificate pinning. + +### A03:2025 -- Injection + +User-supplied data is sent to an interpreter as part of a command or query without proper validation. + +| Relevant Subdomains | Skills | Key Topics | +|---------------------|--------|------------| +| web-application-security | 41 | SQL injection, XSS, command injection, LDAP injection | +| api-security | 28 | GraphQL injection, NoSQL injection, header injection | +| devsecops | 16 | SAST/DAST scanning, input validation, parameterized queries | +| penetration-testing | 23 | Injection testing, payload crafting, WAF bypass | + +**Example skills:** Exploiting and remediating SQL injection, testing for stored/reflected XSS, configuring parameterized queries, SAST pipeline integration. + +### A04:2025 -- Insecure Design + +Risks related to design and architectural flaws, calling for more use of threat modeling and secure design patterns. + +| Relevant Subdomains | Skills | Key Topics | +|---------------------|--------|------------| +| devsecops | 16 | Threat modeling, secure SDLC, security requirements | +| zero-trust-architecture | 13 | Zero trust design principles, defense in depth | +| compliance-governance | 5 | Security architecture review, risk assessment frameworks | +| web-application-security | 41 | Business logic flaws, trust boundary definition | + +**Example skills:** Conducting threat modeling with STRIDE, implementing secure design patterns, defining trust boundaries, security architecture review. + +### A05:2025 -- Security Misconfiguration + +Missing or incorrect security hardening across the application stack. + +| Relevant Subdomains | Skills | Key Topics | +|---------------------|--------|------------| +| cloud-security | 48 | Cloud service misconfiguration, IAM policy errors, S3 bucket exposure | +| container-security | 26 | Container hardening, Kubernetes RBAC, pod security policies | +| network-security | 33 | Firewall rules, segmentation errors, default credentials | +| endpoint-security | 16 | OS hardening, unnecessary services, default configurations | + +**Example skills:** Auditing AWS S3 bucket permissions, hardening Kubernetes clusters, configuring security headers, CIS benchmark compliance. + +### A06:2025 -- Vulnerable and Outdated Components + +Using components with known vulnerabilities or that are no longer maintained. + +| Relevant Subdomains | Skills | Key Topics | +|---------------------|--------|------------| +| vulnerability-management | 24 | CVE tracking, vulnerability scanning, patch management | +| devsecops | 16 | SCA scanning, dependency management, SBOM generation | +| container-security | 26 | Image scanning, base image updates, registry security | +| web-application-security | 41 | Third-party library vulnerabilities, framework updates | + +**Example skills:** Running Trivy container scans, implementing SCA in CI/CD, generating and analyzing SBOMs, CVE prioritization with CVSS/EPSS. + +### A07:2025 -- Identification and Authentication Failures + +Weaknesses in authentication and session management. + +| Relevant Subdomains | Skills | Key Topics | +|---------------------|--------|------------| +| identity-access-management | 33 | MFA implementation, password policies, session fixation | +| web-application-security | 41 | Credential stuffing defense, brute force protection | +| api-security | 28 | API key management, OAuth token handling, JWT validation | +| phishing-defense | 16 | Credential phishing prevention, anti-phishing controls | + +**Example skills:** Implementing FIDO2/WebAuthn, configuring adaptive MFA, securing API authentication, detecting credential stuffing attacks. + +### A08:2025 -- Software and Data Integrity Failures + +Failures related to code and infrastructure that do not protect against integrity violations. + +| Relevant Subdomains | Skills | Key Topics | +|---------------------|--------|------------| +| devsecops | 16 | CI/CD pipeline security, code signing, artifact integrity | +| container-security | 26 | Image signing, admission control, supply chain verification | +| cryptography | 13 | Digital signatures, integrity hashing, code signing certificates | +| vulnerability-management | 24 | Supply chain risk, dependency integrity verification | + +**Example skills:** Implementing Sigstore for container signing, securing CI/CD pipelines, verifying software supply chain integrity, content trust enforcement. + +### A09:2025 -- Security Logging and Monitoring Failures + +Insufficient logging, detection, monitoring, and active response. + +| Relevant Subdomains | Skills | Key Topics | +|---------------------|--------|------------| +| soc-operations | 33 | SIEM configuration, log aggregation, alert tuning | +| threat-hunting | 35 | Log analysis, detection engineering, hypothesis-driven hunting | +| incident-response | 24 | Incident detection, log-based investigation, response automation | +| network-security | 33 | Network monitoring, flow analysis, IDS/IPS tuning | + +**Example skills:** Analyzing security logs with Splunk, writing Sigma detection rules, configuring SIEM correlation rules, implementing centralized logging. + +### A10:2025 -- Server-Side Request Forgery (SSRF) + +Fetching a remote resource without validating the user-supplied URL. + +| Relevant Subdomains | Skills | Key Topics | +|---------------------|--------|------------| +| web-application-security | 41 | SSRF exploitation, URL validation, allowlisting | +| cloud-security | 48 | IMDS exploitation, cloud metadata access, VPC endpoint security | +| api-security | 28 | API-to-API SSRF, webhook validation | +| penetration-testing | 23 | SSRF detection and exploitation techniques | + +**Example skills:** Testing for SSRF vulnerabilities, securing cloud metadata endpoints (IMDSv2), implementing URL validation and allowlisting, detecting SSRF in API integrations. + +## Cross-Reference: OWASP to ATT&CK + +| OWASP Category | Related ATT&CK Techniques | +|---------------|--------------------------| +| A01: Broken Access Control | T1078 (Valid Accounts), T1548 (Abuse Elevation Control) | +| A02: Cryptographic Failures | T1557 (Adversary-in-the-Middle), T1040 (Network Sniffing) | +| A03: Injection | T1190 (Exploit Public-Facing App), T1059 (Command and Scripting) | +| A04: Insecure Design | T1195 (Supply Chain Compromise), cross-cutting | +| A05: Security Misconfiguration | T1574 (Hijack Execution Flow), T1190 | +| A06: Vulnerable Components | T1190 (Exploit Public-Facing App), T1195 | +| A07: Authentication Failures | T1110 (Brute Force), T1539 (Steal Web Session Cookie) | +| A08: Integrity Failures | T1195 (Supply Chain Compromise), T1554 (Compromise Client Software) | +| A09: Logging Failures | T1070 (Indicator Removal), T1562 (Impair Defenses) | +| A10: SSRF | T1190 (Exploit Public-Facing App) | + +## Cross-Reference: OWASP to NIST CSF 2.0 + +| OWASP Category | NIST CSF Functions | CSF Categories | +|---------------|-------------------|----------------| +| A01: Broken Access Control | Protect | PR.AA | +| A02: Cryptographic Failures | Protect | PR.DS | +| A03: Injection | Protect, Detect | PR.DS, DE.AE | +| A04: Insecure Design | Govern, Protect | GV.RM, PR.PS | +| A05: Security Misconfiguration | Protect | PR.PS, PR.IR | +| A06: Vulnerable Components | Identify, Govern | ID.RA, GV.SC | +| A07: Authentication Failures | Protect | PR.AA | +| A08: Integrity Failures | Protect, Govern | PR.DS, GV.SC | +| A09: Logging Failures | Detect | DE.CM, DE.AE | +| A10: SSRF | Protect, Detect | PR.DS, DE.AE | + +## References + +- [OWASP Top 10 Project](https://owasp.org/www-project-top-ten/) +- [OWASP API Security Top 10](https://owasp.org/API-Security/) -- relevant for api-security subdomain +- [OWASP Mobile Top 10](https://owasp.org/www-project-mobile-top-10/) -- relevant for mobile-security subdomain +- [OWASP Testing Guide](https://owasp.org/www-project-web-security-testing-guide/) +- [OWASP ASVS](https://owasp.org/www-project-application-security-verification-standard/) -- Application Security Verification Standard diff --git a/personas/_shared/community-skills/olla/SKILL.md b/personas/_shared/community-skills/olla/SKILL.md new file mode 100644 index 0000000..00f0aea --- /dev/null +++ b/personas/_shared/community-skills/olla/SKILL.md @@ -0,0 +1,370 @@ +--- +name: olla +description: Configure and manage Olla LLM proxy gateway — load balancing, model routing, API translation (Anthropic<>OpenAI), health checking, and multi-backend orchestration for local LLM inference. Use when setting up Olla proxy, configuring backends (Ollama, vLLM, LM Studio, llama.cpp, SGLang, LiteLLM), debugging routing issues, or connecting Claude Code to local models via Olla. +allowed-tools: Bash(*), Read(*), Write(*), Edit(*) +--- + +# Olla — LLM Proxy Gateway + +Olla is a high-performance LLM proxy gateway written in Go that unifies multiple local LLM backends behind a single endpoint with intelligent routing, load balancing, health checking, and API translation. + +**Repository**: https://github.com/thushan/olla +**Docs**: https://thushan.github.io/olla/ +**Default Port**: 40114 (mnemonic: "4 OLLA") + +## Installation + +```bash +# Script install (Linux/macOS) +bash <(curl -s https://raw.githubusercontent.com/thushan/olla/main/install.sh) + +# Go install +go install github.com/thushan/olla@latest + +# Docker +docker pull ghcr.io/thushan/olla:latest +docker run -t --name olla -p 40114:40114 ghcr.io/thushan/olla:latest + +# Build from source +git clone https://github.com/thushan/olla.git && cd olla && make build-release +# Binary: ./bin/olla +``` + +Verify: `olla --version` | Health: `curl http://localhost:40114/internal/health` + +## Core Concepts + +### Proxy Engines +- **Sherpa** (default): Simple, shared HTTP transport. Good for dev, <100 concurrent users, lower memory. +- **Olla**: Per-endpoint connection pools, advanced circuit breaker. Production, high throughput, streaming-heavy. + +### Load Balancing Strategies +- **priority** (default): Routes to highest-priority healthy endpoint. Best for primary/backup hierarchies. +- **round-robin**: Even distribution across healthy endpoints. Best for homogeneous servers. +- **least-connections**: Routes to endpoint with fewest active connections. Best for variable request durations/streaming. + +Health-aware weighting: healthy=1.0, busy=0.3, warming=0.1, unhealthy/unknown=0. + +### Model Routing +- **strict** (default): Only route to endpoints known to have the model. 404 if not found. +- **optimistic**: Try any healthy endpoint if model not found. Prioritizes availability. +- **discovery**: Refresh model catalog before routing. Adds latency but ensures freshness. + +Fallback behavior: `compatible_only` (reject if not found), `all` (any healthy), `none` (always reject). + +### Model Aliases +Map a virtual model name to different actual names across backends: +```yaml +model_aliases: + my-llama: + - "llama3.1:8b" # Ollama + - llama-3.1-8b-instruct # LM Studio + - Meta-Llama-3.1-8B-Instruct.gguf # llama.cpp +``` + +### API Translation (Anthropic <> OpenAI) +Three-stage: Request translation -> Backend processing -> Response translation. +Supports streaming SSE, tool use, vision. Overhead: 1-5ms (translation), ~0ms (passthrough). + +**Passthrough mode**: When backend natively supports Anthropic (vLLM v0.11.1+, llama.cpp b4847+, LM Studio v0.4.1+, Ollama v0.14.0+), Olla bypasses translation entirely. Header: `X-Olla-Mode: passthrough`. + +### Health Checking & Circuit Breaker +- Continuous monitoring with configurable intervals (default: 5s) +- Exponential backoff on failures (2x, 4x, 8x... capped at 60s) +- Circuit breaker: Closed -> Open (3 failures) -> Half-Open (30s, test traffic) -> Closed +- Auto model discovery on recovery +- States: Healthy, Busy, Warming, Offline, Unhealthy + +## Configuration + +Config search order: `--config` flag > `OLLA_CONFIG_FILE` env > `config/config.local.yaml` > `config/config.yaml` > `config.yaml` > `default.yaml` + +Best practice: Create `config/config.local.yaml` with only overrides. + +### Minimal Config Example +```yaml +server: + host: "localhost" + port: 40114 + +proxy: + engine: "sherpa" + load_balancer: "priority" + +discovery: + type: "static" + static: + endpoints: + - url: "http://localhost:11434" + name: "local-ollama" + type: "ollama" + priority: 100 +``` + +### Full Config Reference + +#### Server +```yaml +server: + host: "localhost" # Bind address + port: 40114 # Listen port + read_timeout: 30s + write_timeout: 0s # MUST be 0s for streaming + shutdown_timeout: 10s + idle_timeout: 0s + request_logging: true + request_limits: + max_body_size: 104857600 # 100MB + max_header_size: 1048576 # 1MB + rate_limits: + global_requests_per_minute: 1000 + per_ip_requests_per_minute: 100 + health_requests_per_minute: 1000 + burst_size: 50 + cleanup_interval: 5m + trust_proxy_headers: false + trusted_proxy_cidrs: ["127.0.0.0/8", "10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] +``` + +#### Proxy +```yaml +proxy: + engine: "sherpa" # sherpa | olla + profile: "auto" # auto | streaming | standard + load_balancer: "priority" # priority | round-robin | least-connections + connection_timeout: 30s + response_timeout: 600s # 10 min default + read_timeout: 120s + stream_buffer_size: 8192 # 8KB (sherpa), 65536 (olla recommended) + profile_filter: + include: [] # glob patterns + exclude: [] +``` + +#### Discovery & Endpoints +```yaml +discovery: + type: "static" + refresh_interval: 30s + static: + endpoints: + - url: "http://localhost:11434" + name: "local-ollama" + type: "ollama" # ollama|lm-studio|vllm|sglang|llamacpp|lemonade|litellm|openai|docker-model-runner + priority: 100 # 0-100, higher = preferred + preserve_path: false + health_check_url: "" # auto-detected per type + model_url: "" # auto-detected per type + check_interval: 5s + check_timeout: 2s + model_discovery: + enabled: true + interval: 5m + timeout: 30s + concurrent_workers: 5 + retry_attempts: 3 + retry_backoff: 1s +``` + +#### Model Registry & Routing +```yaml +model_registry: + type: "memory" + enable_unifier: true + routing_strategy: + type: "strict" # strict | optimistic | discovery + options: + fallback_behavior: "compatible_only" # compatible_only | all | none + discovery_timeout: 2s + discovery_refresh_on_miss: false + unification: + enabled: true + stale_threshold: 24h + cleanup_interval: 5m + cache_ttl: 10m + +model_aliases: + alias-name: + - "backend1-model-name" + - "backend2-model-name" +``` + +#### Translators +```yaml +translators: + anthropic: + enabled: true + passthrough_enabled: true + max_message_size: 10485760 # 10MB + inspector: + enabled: false + output_dir: "logs/inspector/anthropic" + session_header: "X-Session-ID" +``` + +#### Logging & Engineering +```yaml +logging: + level: "info" # debug | info | warn | error + format: "json" # json | text + output: "stdout" # stdout | file + +engineering: + show_nerdstats: false # Memory/GC stats on shutdown +``` + +### Environment Variables +Pattern: `OLLA_
_` (uppercase, underscores) +```bash +OLLA_SERVER_PORT=8080 +OLLA_PROXY_ENGINE=olla +OLLA_LOG_LEVEL=debug +OLLA_CONFIG_FILE=/path/to/config.yaml +``` + +## Claude Code Integration + +Connect Claude Code to local models through Olla's Anthropic API translation: + +```bash +export ANTHROPIC_BASE_URL="http://localhost:40114/olla/anthropic" +export ANTHROPIC_API_KEY="not-really-needed" +export ANTHROPIC_MODEL="openai/gpt-oss-120b" # or your model name +export ANTHROPIC_SMALL_FAST_MODEL="${ANTHROPIC_MODEL}" +export ANTHROPIC_DEFAULT_HAIKU_MODEL="${ANTHROPIC_MODEL}" +export ANTHROPIC_DEFAULT_SONNET_MODEL="${ANTHROPIC_MODEL}" +export ANTHROPIC_DEFAULT_OPUS_MODEL="${ANTHROPIC_MODEL}" +export CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC=1 +export API_TIMEOUT_MS=3000000 +``` + +**Recommended models for Claude Code**: qwen2.5-coder:32b, deepseek-coder-v2, codellama:34b, llama3.3, qwen3:32b + +### Docker Compose Quick Setup +```yaml +# compose.yaml +services: + ollama: + image: ollama/ollama + container_name: ollama + ports: ["11434:11434"] + volumes: ["ollama_data:/root/.ollama"] + + olla: + image: ghcr.io/thushan/olla:latest + container_name: olla + ports: ["40114:40114"] + volumes: ["./olla.yaml:/app/config/config.local.yaml"] + depends_on: [ollama] + +volumes: + ollama_data: +``` + +For Docker: set `server.host: "0.0.0.0"` to bind all interfaces. + +## API Endpoints + +### System +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/internal/health` | GET | Health verification | +| `/internal/status` | GET | System metrics | +| `/internal/status/endpoints` | GET | Backend availability | +| `/internal/status/models` | GET | Model registry | +| `/internal/stats/models` | GET | Usage by model | +| `/internal/stats/translators` | GET | Translator performance | +| `/internal/process` | GET | Runtime info | + +### Unified Models +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/olla/models` | GET | All models across all providers | + +### Provider-Specific Routes +| Provider | Route Prefix | +|----------|-------------| +| Ollama | `/olla/ollama/*` | +| LM Studio | `/olla/lmstudio/*` | +| OpenAI | `/olla/openai/*` | +| vLLM | `/olla/vllm/*` | +| SGLang | `/olla/sglang/*` | +| LiteLLM | `/olla/litellm/*` | +| llama.cpp | `/olla/llamacpp/*` | +| Lemonade | `/olla/lemonade/*` | + +### Anthropic Translation +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/olla/anthropic/v1/messages` | POST | Chat (streaming supported) | +| `/olla/anthropic/v1/models` | GET | List models | +| `/olla/anthropic/v1/messages/count_tokens` | POST | Token estimation | + +### Response Headers +- `X-Olla-Request-ID` — Unique request ID +- `X-Olla-Endpoint` — Selected backend +- `X-Olla-Model` — Model identifier +- `X-Olla-Backend-Type` — Provider type +- `X-Olla-Response-Time` — Processing duration +- `X-Olla-Routing-Strategy` — Active strategy +- `X-Olla-Routing-Decision` — routed/fallback/rejected +- `X-Olla-Mode` — "passthrough" when native Anthropic format + +## Troubleshooting + +```bash +# Health check +curl http://localhost:40114/internal/health + +# List available models +curl http://localhost:40114/olla/anthropic/v1/models | jq + +# Check backend health +curl http://localhost:40114/internal/status/endpoints | jq + +# Test streaming +curl -N -X POST http://localhost:40114/olla/anthropic/v1/messages \ + -H "Content-Type: application/json" \ + -d '{"model":"llama4:latest","max_tokens":50,"messages":[{"role":"user","content":"Count to 5"}],"stream":true}' + +# Translator stats +curl http://localhost:40114/internal/stats/translators | jq + +# Debug logging +OLLA_LOG_LEVEL=debug olla --config config.yaml +``` + +### Common Issues +- **Port conflict**: Set `OLLA_SERVER_PORT` or change config +- **Streaming broken**: Ensure `server.write_timeout: 0s` +- **Docker can't connect**: Set `server.host: "0.0.0.0"` +- **Model not found**: Check routing strategy (try `optimistic`), verify with `/olla/models` +- **Slow responses**: Switch to `olla` engine, increase `stream_buffer_size` + +## Supported Backends + +| Backend | Type Key | Notes | +|---------|----------|-------| +| Ollama | `ollama` | Most common local setup | +| LM Studio | `lm-studio` | GUI-based, model unification | +| vLLM | `vllm` | High-perf, production grade | +| vLLM-MLX | `vllm-mlx` | Apple Silicon via MLX | +| SGLang | `sglang` | RadixAttention, vision | +| llama.cpp | `llamacpp` | GGUF, CPU-first, edge | +| Lemonade SDK | `lemonade` | AMD Ryzen AI | +| LiteLLM | `litellm` | 100+ cloud providers | +| Docker Model Runner | `docker-model-runner` | OCI model distribution | +| OpenAI-compatible | `openai` | Generic fallback | + +## Development + +```bash +git clone https://github.com/thushan/olla.git && cd olla +make deps # Install dependencies +make dev # Build with hot-reload +make test # Run tests +make ready # Pre-commit checks (fmt + lint + test) +make bench # Benchmarks +``` + +Architecture: Hexagonal (ports & adapters) — `internal/core/` (domain), `internal/adapter/` (infra), `internal/app/` (HTTP handlers). diff --git a/personas/_shared/community-skills/olla/references/olla-docs-links.md b/personas/_shared/community-skills/olla/references/olla-docs-links.md new file mode 100644 index 0000000..cfbeac6 --- /dev/null +++ b/personas/_shared/community-skills/olla/references/olla-docs-links.md @@ -0,0 +1,87 @@ +# Olla Documentation Reference Links + +## Official Documentation +- Home: https://thushan.github.io/olla/ +- Demo: https://thushan.github.io/olla/demo/ +- FAQ: https://thushan.github.io/olla/faq/ +- Usage: https://thushan.github.io/olla/usage/ +- About: https://thushan.github.io/olla/about/ + +## Getting Started +- Installation: https://thushan.github.io/olla/getting-started/installation/ +- Quickstart: https://thushan.github.io/olla/getting-started/quickstart/ + +## Concepts +- Overview: https://thushan.github.io/olla/concepts/overview/ +- Load Balancing: https://thushan.github.io/olla/concepts/load-balancing/ +- Model Routing: https://thushan.github.io/olla/concepts/model-routing/ +- Model Aliases: https://thushan.github.io/olla/concepts/model-aliases/ +- Model Unification: https://thushan.github.io/olla/concepts/model-unification/ +- Health Checking: https://thushan.github.io/olla/concepts/health-checking/ +- API Translation: https://thushan.github.io/olla/concepts/api-translation/ +- Proxy Engines: https://thushan.github.io/olla/concepts/proxy-engines/ +- Proxy Profiles: https://thushan.github.io/olla/concepts/proxy-profiles/ +- Profile System: https://thushan.github.io/olla/concepts/profile-system/ +- Provider Metrics: https://thushan.github.io/olla/concepts/provider-metrics/ + +## Configuration +- Overview: https://thushan.github.io/olla/configuration/overview/ +- Filters: https://thushan.github.io/olla/configuration/filters/ +- Reference: https://thushan.github.io/olla/configuration/reference/ +- Examples: https://thushan.github.io/olla/configuration/examples/ +- Best Practices - Configuration: https://thushan.github.io/olla/configuration/practices/configuration/ +- Best Practices - Security: https://thushan.github.io/olla/configuration/practices/security/ +- Best Practices - Performance: https://thushan.github.io/olla/configuration/practices/performance/ +- Best Practices - Monitoring: https://thushan.github.io/olla/configuration/practices/monitoring/ + +## Integrations +- Overview: https://thushan.github.io/olla/integrations/overview/ + +### Backend +- Ollama: https://thushan.github.io/olla/integrations/backend/ollama/ +- LM Studio: https://thushan.github.io/olla/integrations/backend/lmstudio/ +- vLLM: https://thushan.github.io/olla/integrations/backend/vllm/ +- vLLM-MLX: https://thushan.github.io/olla/integrations/backend/vllm-mlx/ +- SGLang: https://thushan.github.io/olla/integrations/backend/sglang/ +- Lemonade SDK: https://thushan.github.io/olla/integrations/backend/lemonade/ +- LiteLLM: https://thushan.github.io/olla/integrations/backend/litellm/ +- llama.cpp: https://thushan.github.io/olla/integrations/backend/llamacpp/ +- Docker Model Runner: https://thushan.github.io/olla/integrations/backend/docker-model-runner/ + +### Frontend +- OpenWebUI: https://thushan.github.io/olla/integrations/frontend/openwebui/ +- OpenWebUI (OpenAI): https://thushan.github.io/olla/integrations/frontend/openwebui-openai/ +- Claude Code: https://thushan.github.io/olla/integrations/frontend/claude-code/ +- OpenCode: https://thushan.github.io/olla/integrations/frontend/opencode/ +- Crush CLI: https://thushan.github.io/olla/integrations/frontend/crush-cli/ + +### API Translation +- Anthropic: https://thushan.github.io/olla/integrations/api-translation/anthropic/ + +## API Reference +- Overview: https://thushan.github.io/olla/api-reference/overview/ +- System Endpoints: https://thushan.github.io/olla/api-reference/system/ +- Models API: https://thushan.github.io/olla/api-reference/models/ + +## Compare +- Overview: https://thushan.github.io/olla/compare/overview/ +- Integration Patterns: https://thushan.github.io/olla/compare/integration-patterns/ +- vs GPUStack: https://thushan.github.io/olla/compare/gpustack/ +- vs LiteLLM: https://thushan.github.io/olla/compare/litellm/ +- vs LocalAI: https://thushan.github.io/olla/compare/localai/ + +## Development +- Overview: https://thushan.github.io/olla/development/overview/ +- Setup: https://thushan.github.io/olla/development/setup/ +- Architecture: https://thushan.github.io/olla/development/architecture/ +- Patterns: https://thushan.github.io/olla/development/patterns/ +- Circuit Breaker: https://thushan.github.io/olla/development/circuit-breaker/ +- Contributing: https://thushan.github.io/olla/development/contributing/ +- Testing: https://thushan.github.io/olla/development/testing/ +- Benchmarking: https://thushan.github.io/olla/development/benchmarking/ +- Anthropic Inspector: https://thushan.github.io/olla/notes/anthropic-inspector/ + +## GitHub +- Repository: https://github.com/thushan/olla +- Releases: https://github.com/thushan/olla/releases +- Issues: https://github.com/thushan/olla/issues diff --git a/personas/_shared/skills/acquiring-disk-image-with-dd-and-dcfldd/LICENSE b/personas/_shared/skills/acquiring-disk-image-with-dd-and-dcfldd/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/acquiring-disk-image-with-dd-and-dcfldd/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/acquiring-disk-image-with-dd-and-dcfldd/SKILL.md b/personas/_shared/skills/acquiring-disk-image-with-dd-and-dcfldd/SKILL.md new file mode 100644 index 0000000..d3d1730 --- /dev/null +++ b/personas/_shared/skills/acquiring-disk-image-with-dd-and-dcfldd/SKILL.md @@ -0,0 +1,242 @@ +--- +name: acquiring-disk-image-with-dd-and-dcfldd +description: Create forensically sound bit-for-bit disk images using dd and dcfldd while preserving evidence integrity through + hash verification. +domain: cybersecurity +subdomain: digital-forensics +tags: +- forensics +- disk-imaging +- evidence-acquisition +- dd +- dcfldd +- hash-verification +version: '1.0' +author: mahipal +license: Apache-2.0 +nist_csf: +- RS.AN-01 +- RS.AN-03 +- DE.AE-02 +- RS.MA-01 +--- + +# Acquiring Disk Image with dd and dcfldd + +## When to Use +- When you need to create a forensic copy of a suspect drive for investigation +- During incident response when preserving volatile disk evidence before analysis +- When law enforcement or legal proceedings require a verified bit-for-bit copy +- Before performing any destructive analysis on a storage device +- When acquiring images from physical drives, USB devices, or memory cards + +## Prerequisites +- Linux-based forensic workstation (SIFT, Kali, or any Linux distro) +- `dd` (pre-installed on all Linux systems) or `dcfldd` (enhanced forensic version) +- Write-blocker hardware or software write-blocking configured +- Destination drive with sufficient storage (larger than source) +- Root/sudo privileges on the forensic workstation +- SHA-256 or MD5 hashing utilities (`sha256sum`, `md5sum`) + +## Workflow + +### Step 1: Identify the Target Device and Enable Write Protection + +```bash +# List all connected block devices to identify the target +lsblk -o NAME,SIZE,TYPE,MOUNTPOINT,MODEL + +# Verify the device details +fdisk -l /dev/sdb + +# Enable software write-blocking (if no hardware blocker) +blockdev --setro /dev/sdb + +# Verify read-only status +blockdev --getro /dev/sdb +# Output: 1 (means read-only is enabled) + +# Alternatively, use udev rules for persistent write-blocking +echo 'SUBSYSTEM=="block", ATTRS{serial}=="WD-WCAV5H861234", ATTR{ro}="1"' > /etc/udev/rules.d/99-writeblock.rules +udevadm control --reload-rules +``` + +### Step 2: Prepare the Destination and Document the Source + +```bash +# Create case directory structure +mkdir -p /cases/case-2024-001/{images,hashes,logs,notes} + +# Document source drive information +hdparm -I /dev/sdb > /cases/case-2024-001/notes/source_drive_info.txt + +# Record the serial number and model +smartctl -i /dev/sdb >> /cases/case-2024-001/notes/source_drive_info.txt + +# Pre-hash the source device +sha256sum /dev/sdb | tee /cases/case-2024-001/hashes/source_hash_before.txt +``` + +### Step 3: Acquire the Image Using dd + +```bash +# Basic dd acquisition with progress and error handling +dd if=/dev/sdb of=/cases/case-2024-001/images/evidence.dd \ + bs=4096 \ + conv=noerror,sync \ + status=progress 2>&1 | tee /cases/case-2024-001/logs/dd_acquisition.log + +# For compressed images to save space +dd if=/dev/sdb bs=4096 conv=noerror,sync status=progress | \ + gzip -c > /cases/case-2024-001/images/evidence.dd.gz + +# Using dd with a specific count for partial acquisition +dd if=/dev/sdb of=/cases/case-2024-001/images/first_1gb.dd \ + bs=1M count=1024 status=progress +``` + +### Step 4: Acquire Using dcfldd (Preferred Forensic Method) + +```bash +# Install dcfldd if not present +apt-get install dcfldd + +# Acquire image with built-in hashing and split output +dcfldd if=/dev/sdb \ + of=/cases/case-2024-001/images/evidence.dd \ + hash=sha256,md5 \ + hashwindow=1G \ + hashlog=/cases/case-2024-001/hashes/acquisition_hashes.txt \ + bs=4096 \ + conv=noerror,sync \ + errlog=/cases/case-2024-001/logs/dcfldd_errors.log + +# Split large images into manageable segments +dcfldd if=/dev/sdb \ + of=/cases/case-2024-001/images/evidence.dd \ + hash=sha256 \ + hashlog=/cases/case-2024-001/hashes/split_hashes.txt \ + bs=4096 \ + split=2G \ + splitformat=aa + +# Acquire with verification pass +dcfldd if=/dev/sdb \ + of=/cases/case-2024-001/images/evidence.dd \ + hash=sha256 \ + hashlog=/cases/case-2024-001/hashes/verification.txt \ + vf=/cases/case-2024-001/images/evidence.dd \ + verifylog=/cases/case-2024-001/logs/verify.log +``` + +### Step 5: Verify Image Integrity + +```bash +# Hash the acquired image +sha256sum /cases/case-2024-001/images/evidence.dd | \ + tee /cases/case-2024-001/hashes/image_hash.txt + +# Compare source and image hashes +diff <(sha256sum /dev/sdb | awk '{print $1}') \ + <(sha256sum /cases/case-2024-001/images/evidence.dd | awk '{print $1}') + +# If using split images, verify each segment +sha256sum /cases/case-2024-001/images/evidence.dd.* | \ + tee /cases/case-2024-001/hashes/split_image_hashes.txt + +# Re-hash source to confirm no changes occurred +sha256sum /dev/sdb | tee /cases/case-2024-001/hashes/source_hash_after.txt +diff /cases/case-2024-001/hashes/source_hash_before.txt \ + /cases/case-2024-001/hashes/source_hash_after.txt +``` + +### Step 6: Document the Acquisition Process + +```bash +# Generate acquisition report +cat << 'EOF' > /cases/case-2024-001/notes/acquisition_report.txt +DISK IMAGE ACQUISITION REPORT +============================== +Case Number: 2024-001 +Date/Time: $(date -u +"%Y-%m-%d %H:%M:%S UTC") +Examiner: [Name] + +Source Device: /dev/sdb +Model: [from hdparm output] +Serial: [from hdparm output] +Size: [from fdisk output] + +Acquisition Tool: dcfldd v1.9.1 +Block Size: 4096 +Write Blocker: [Hardware/Software model] + +Image File: evidence.dd +Image Hash (SHA-256): [from hash file] +Source Hash (SHA-256): [from hash file] +Hash Match: YES/NO + +Errors During Acquisition: [from error log] +EOF + +# Compress logs for archival +tar -czf /cases/case-2024-001/acquisition_package.tar.gz \ + /cases/case-2024-001/hashes/ \ + /cases/case-2024-001/logs/ \ + /cases/case-2024-001/notes/ +``` + +## Key Concepts + +| Concept | Description | +|---------|-------------| +| Bit-for-bit copy | Exact replica of source including unallocated space and slack space | +| Write blocker | Hardware or software mechanism preventing writes to evidence media | +| Hash verification | Cryptographic hash comparing source and image to prove integrity | +| Block size (bs) | Transfer chunk size affecting speed; 4096 or 64K typical for forensics | +| conv=noerror,sync | Continue on read errors and pad with zeros to maintain offset alignment | +| Chain of custody | Documented trail proving evidence has not been tampered with | +| Split imaging | Breaking large images into smaller files for storage and transport | +| Raw/dd format | Bit-for-bit image format without metadata container overhead | + +## Tools & Systems + +| Tool | Purpose | +|------|---------| +| dd | Standard Unix disk duplication utility for raw imaging | +| dcfldd | DoD Computer Forensics Laboratory enhanced version of dd with hashing | +| dc3dd | Another forensic dd variant from the DoD Cyber Crime Center | +| sha256sum | SHA-256 hash calculation for integrity verification | +| blockdev | Linux command to set block device read-only mode | +| hdparm | Drive identification and parameter reporting | +| smartctl | S.M.A.R.T. data retrieval for drive health and identification | +| lsblk | Block device enumeration and identification | + +## Common Scenarios + +**Scenario 1: Acquiring a Suspect Laptop Hard Drive** +Connect the drive via a Tableau T35u hardware write-blocker, identify as `/dev/sdb`, use dcfldd with SHA-256 hashing, split into 4GB segments for DVD archival, verify hashes match, document in case notes. + +**Scenario 2: Imaging a USB Flash Drive from a Compromised Workstation** +Use software write-blocking with `blockdev --setro`, acquire with dcfldd including MD5 and SHA-256 dual hashing, image is small enough for single file, verify and store on encrypted case drive. + +**Scenario 3: Remote Acquisition Over Network** +Use dd piped through netcat or ssh for remote acquisition: `ssh root@remote "dd if=/dev/sda bs=4096" | dd of=remote_image.dd bs=4096`, hash both ends independently to verify transfer integrity. + +**Scenario 4: Acquiring from a Failing Drive** +Use `ddrescue` first to recover readable sectors, then use dd with `conv=noerror,sync` to fill gaps with zeros, document which sectors were unreadable in the error log. + +## Output Format + +``` +Acquisition Summary: + Source: /dev/sdb (500GB Western Digital WD5000AAKX) + Destination: /cases/case-2024-001/images/evidence.dd + Tool: dcfldd 1.9.1 + Block Size: 4096 bytes + Duration: 2h 15m 32s + Bytes Copied: 500,107,862,016 + Errors: 0 bad sectors + Source SHA-256: a3f2b8c9d4e5f6a7b8c9d0e1f2a3b4c5d6e7f8a9b0c1d2e3f4a5b6c7d8e9f0a1 + Image SHA-256: a3f2b8c9d4e5f6a7b8c9d0e1f2a3b4c5d6e7f8a9b0c1d2e3f4a5b6c7d8e9f0a1 + Verification: PASSED - Hashes match +``` diff --git a/personas/_shared/skills/acquiring-disk-image-with-dd-and-dcfldd/references/api-reference.md b/personas/_shared/skills/acquiring-disk-image-with-dd-and-dcfldd/references/api-reference.md new file mode 100644 index 0000000..a7a24b9 --- /dev/null +++ b/personas/_shared/skills/acquiring-disk-image-with-dd-and-dcfldd/references/api-reference.md @@ -0,0 +1,99 @@ +# API Reference: dd and dcfldd Disk Imaging + +## dd - Standard Unix Disk Duplication + +### Basic Syntax +```bash +dd if= of= [options] +``` + +### Key Options +| Flag | Description | Example | +|------|-------------|---------| +| `if=` | Input file (source device) | `if=/dev/sdb` | +| `of=` | Output file (destination image) | `of=evidence.dd` | +| `bs=` | Block size for read/write | `bs=4096` (forensic standard) | +| `count=` | Number of blocks to copy | `count=1024` | +| `skip=` | Skip N blocks from input start | `skip=2048` | +| `conv=` | Conversion options | `conv=noerror,sync` | +| `status=` | Transfer statistics level | `status=progress` | + +### conv= Values +- `noerror` - Continue on read errors (do not abort) +- `sync` - Pad input blocks with zeros on error (preserves offset alignment) +- `notrunc` - Do not truncate output file + +### Output Format +``` +500107862016 bytes (500 GB, 466 GiB) copied, 8132.45 s, 61.5 MB/s +976773168+0 records in +976773168+0 records out +``` + +## dcfldd - DoD Forensic dd + +### Basic Syntax +```bash +dcfldd if= of= [options] +``` + +### Extended Options +| Flag | Description | Example | +|------|-------------|---------| +| `hash=` | Hash algorithm(s) | `hash=sha256,md5` | +| `hashlog=` | File for hash output | `hashlog=hashes.txt` | +| `hashwindow=` | Hash every N bytes | `hashwindow=1G` | +| `hashconv=` | Hash before or after conversion | `hashconv=after` | +| `errlog=` | Error log file | `errlog=errors.log` | +| `split=` | Split output into chunks | `split=2G` | +| `splitformat=` | Suffix format for split files | `splitformat=aa` | +| `vf=` | Verification file | `vf=evidence.dd` | +| `verifylog=` | Verification result log | `verifylog=verify.log` | + +### Output Format +``` +Total (sha256): a3f2b8c9d4e5f6a7b8c9d0e1f2a3b4c5... +1024+0 records in +1024+0 records out +``` + +## sha256sum - Hash Verification + +### Syntax +```bash +sha256sum +sha256sum -c +``` + +### Output Format +``` +a3f2b8c9d4e5f6... /dev/sdb +a3f2b8c9d4e5f6... evidence.dd +``` + +## blockdev - Write Protection + +### Syntax +```bash +blockdev --setro # Set read-only +blockdev --setrw # Set read-write +blockdev --getro # Check: 1=RO, 0=RW +blockdev --getsize64 # Size in bytes +``` + +## lsblk - Block Device Enumeration + +### Syntax +```bash +lsblk -o NAME,SIZE,TYPE,MOUNTPOINT,MODEL,SERIAL,RO +lsblk -J # JSON output +lsblk -p # Full device paths +``` + +## hdparm - Drive Identification + +### Syntax +```bash +hdparm -I # Detailed drive info +hdparm -i # Summary identification +``` diff --git a/personas/_shared/skills/acquiring-disk-image-with-dd-and-dcfldd/scripts/agent.py b/personas/_shared/skills/acquiring-disk-image-with-dd-and-dcfldd/scripts/agent.py new file mode 100644 index 0000000..2b3d5ac --- /dev/null +++ b/personas/_shared/skills/acquiring-disk-image-with-dd-and-dcfldd/scripts/agent.py @@ -0,0 +1,181 @@ +#!/usr/bin/env python3 +"""Forensic disk image acquisition agent using dd and dcfldd with hash verification.""" + +import shlex +import subprocess +import hashlib +import os +import datetime +import json + + +def run_cmd(cmd, capture=True): + """Execute a command and return output.""" + if isinstance(cmd, str): + cmd = shlex.split(cmd) + result = subprocess.run(cmd, capture_output=capture, text=True, timeout=120) + return result.stdout.strip(), result.stderr.strip(), result.returncode + + +def list_block_devices(): + """Enumerate connected block devices.""" + stdout, _, rc = run_cmd("lsblk -J -o NAME,SIZE,TYPE,MOUNTPOINT,MODEL,SERIAL,RO") + if rc == 0 and stdout: + return json.loads(stdout) + return {"blockdevices": []} + + +def check_write_protection(device): + """Verify a device is set to read-only mode.""" + stdout, _, rc = run_cmd(f"blockdev --getro {device}") + if rc == 0: + return stdout.strip() == "1" + return False + + +def enable_write_protection(device): + """Enable software write-blocking on the target device.""" + _, _, rc = run_cmd(f"blockdev --setro {device}") + if rc != 0: + print(f"[ERROR] Failed to set {device} read-only. Run as root.") + return False + if check_write_protection(device): + print(f"[OK] Write protection enabled on {device}") + return True + print(f"[ERROR] Write protection verification failed for {device}") + return False + + +def compute_hash(path, algorithm="sha256", block_size=65536): + """Compute the SHA-256 or MD5 hash of a file or device.""" + h = hashlib.new(algorithm) + try: + with open(path, "rb") as f: + while True: + block = f.read(block_size) + if not block: + break + h.update(block) + except PermissionError: + print(f"[ERROR] Permission denied reading {path}. Run as root.") + return None + except FileNotFoundError: + print(f"[ERROR] Path not found: {path}") + return None + return h.hexdigest() + + +def acquire_with_dd(source, destination, block_size=4096, log_file=None): + """Acquire a forensic image using dd with error handling.""" + dd_cmd = [ + "dd", f"if={source}", f"of={destination}", + f"bs={block_size}", "conv=noerror,sync", "status=progress" + ] + print(f"[*] Starting dd acquisition: {source} -> {destination}") + print(f"[*] Block size: {block_size}") + start = datetime.datetime.utcnow() + if log_file: + dd_proc = subprocess.run(dd_cmd, capture_output=True, text=True, timeout=120) + combined = (dd_proc.stdout or "") + (dd_proc.stderr or "") + with open(log_file, "w") as lf: + lf.write(combined) + rc = dd_proc.returncode + else: + result = subprocess.run(dd_cmd, text=True, timeout=120) + rc = result.returncode + elapsed = (datetime.datetime.utcnow() - start).total_seconds() + print(f"[*] Acquisition completed in {elapsed:.1f} seconds (rc={rc})") + return rc == 0 + + +def acquire_with_dcfldd(source, destination, hash_alg="sha256", hash_log=None, + error_log=None, block_size=4096, split_size=None): + """Acquire a forensic image using dcfldd with built-in hashing.""" + cmd = [ + "dcfldd", f"if={source}", f"of={destination}", + f"bs={block_size}", "conv=noerror,sync", + f"hash={hash_alg}", "hashwindow=1G", + ] + if hash_log: + cmd.append(f"hashlog={hash_log}") + if error_log: + cmd.append(f"errlog={error_log}") + if split_size: + cmd.extend([f"split={split_size}", "splitformat=aa"]) + print(f"[*] Starting dcfldd acquisition: {source} -> {destination}") + start = datetime.datetime.utcnow() + result = subprocess.run(cmd, text=True, timeout=120) + rc = result.returncode + elapsed = (datetime.datetime.utcnow() - start).total_seconds() + print(f"[*] dcfldd completed in {elapsed:.1f} seconds (rc={rc})") + return rc == 0 + + +def verify_image(source, image_path, algorithm="sha256"): + """Verify image integrity by comparing hashes of source and acquired image.""" + print(f"[*] Computing {algorithm} hash of source: {source}") + source_hash = compute_hash(source, algorithm) + print(f" Source hash: {source_hash}") + print(f"[*] Computing {algorithm} hash of image: {image_path}") + image_hash = compute_hash(image_path, algorithm) + print(f" Image hash: {image_hash}") + if source_hash and image_hash: + match = source_hash == image_hash + status = "PASSED" if match else "FAILED" + print(f"[{'OK' if match else 'FAIL'}] Verification: {status}") + return match, source_hash, image_hash + return False, source_hash, image_hash + + +def generate_report(case_dir, source_device, image_path, tool_used, + source_hash, image_hash, verified, elapsed_seconds=0): + """Generate a forensic acquisition report.""" + report = { + "report_type": "Disk Image Acquisition", + "timestamp": datetime.datetime.utcnow().isoformat() + "Z", + "case_directory": case_dir, + "source_device": source_device, + "image_file": image_path, + "acquisition_tool": tool_used, + "block_size": 4096, + "source_hash_sha256": source_hash, + "image_hash_sha256": image_hash, + "hash_verified": verified, + "duration_seconds": elapsed_seconds, + } + report_path = os.path.join(case_dir, "acquisition_report.json") + with open(report_path, "w") as f: + json.dump(report, f, indent=2) + print(f"[*] Report saved to {report_path}") + return report + + +if __name__ == "__main__": + print("=" * 60) + print("Forensic Disk Image Acquisition Agent") + print("Tools: dd / dcfldd with SHA-256 verification") + print("=" * 60) + + # Demo: list block devices + print("\n[*] Enumerating block devices...") + devices = list_block_devices() + for dev in devices.get("blockdevices", []): + name = dev.get("name", "?") + size = dev.get("size", "?") + dtype = dev.get("type", "?") + model = dev.get("model", "N/A") + ro = "RO" if dev.get("ro") else "RW" + print(f" /dev/{name} {size} {dtype} {model} [{ro}]") + + # Demo workflow (dry run) + demo_source = "/dev/sdb" + demo_case = "/cases/demo-case/images" + demo_image = os.path.join(demo_case, "evidence.dd") + + print(f"\n[DEMO] Acquisition workflow for {demo_source}:") + print(f" 1. Enable write protection: blockdev --setro {demo_source}") + print(f" 2. Acquire with dcfldd: dcfldd if={demo_source} of={demo_image} " + f"hash=sha256 hashwindow=1G bs=4096 conv=noerror,sync") + print(f" 3. Verify: compare SHA-256 of {demo_source} and {demo_image}") + print(f" 4. Generate acquisition report with chain-of-custody metadata") + print("\n[*] Agent ready. Provide a source device and case directory to begin.") diff --git a/personas/_shared/skills/analyzing-active-directory-acl-abuse/LICENSE b/personas/_shared/skills/analyzing-active-directory-acl-abuse/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-active-directory-acl-abuse/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-active-directory-acl-abuse/SKILL.md b/personas/_shared/skills/analyzing-active-directory-acl-abuse/SKILL.md new file mode 100644 index 0000000..8deab1e --- /dev/null +++ b/personas/_shared/skills/analyzing-active-directory-acl-abuse/SKILL.md @@ -0,0 +1,84 @@ +--- +name: analyzing-active-directory-acl-abuse +description: Detect dangerous ACL misconfigurations in Active Directory using ldap3 to identify GenericAll, WriteDACL, and + WriteOwner abuse paths +domain: cybersecurity +subdomain: identity-security +tags: +- active-directory +- acl-abuse +- ldap +- privilege-escalation +version: '1.0' +author: mahipal +license: Apache-2.0 +nist_csf: +- PR.AA-01 +- PR.AA-05 +- PR.AA-06 +--- + + +# Analyzing Active Directory ACL Abuse + +## Overview + +Active Directory Access Control Lists (ACLs) define permissions on AD objects through Discretionary Access Control Lists (DACLs) containing Access Control Entries (ACEs). Misconfigured ACEs can grant non-privileged users dangerous permissions such as GenericAll (full control), WriteDACL (modify permissions), WriteOwner (take ownership), and GenericWrite (modify attributes) on sensitive objects like Domain Admins groups, domain controllers, or GPOs. + +This skill uses the ldap3 Python library to connect to a Domain Controller, query objects with their nTSecurityDescriptor attribute, parse the binary security descriptor into SDDL (Security Descriptor Definition Language) format, and identify ACEs that grant dangerous permissions to non-administrative principals. These misconfigurations are the basis for ACL-based attack paths discovered by tools like BloodHound. + + +## When to Use + +- When investigating security incidents that require analyzing active directory acl abuse +- When building detection rules or threat hunting queries for this domain +- When SOC analysts need structured procedures for this analysis type +- When validating security monitoring coverage for related attack techniques + +## Prerequisites + +- Python 3.9 or later with ldap3 library (`pip install ldap3`) +- Domain user credentials with read access to AD objects +- Network connectivity to Domain Controller on port 389 (LDAP) or 636 (LDAPS) +- Understanding of Active Directory security model and SDDL format + +## Steps + +1. **Connect to Domain Controller**: Establish an LDAP connection using ldap3 with NTLM or simple authentication. Use LDAPS (port 636) for encrypted connections in production. + +2. **Query target objects**: Search the target OU or entire domain for objects including users, groups, computers, and OUs. Request the `nTSecurityDescriptor`, `distinguishedName`, `objectClass`, and `sAMAccountName` attributes. + +3. **Parse security descriptors**: Convert the binary nTSecurityDescriptor into its SDDL string representation. Parse each ACE in the DACL to extract the trustee SID, access mask, and ACE type (allow/deny). + +4. **Resolve SIDs to principals**: Map security identifiers (SIDs) to human-readable account names using LDAP lookups against the domain. Identify well-known SIDs for built-in groups. + +5. **Check for dangerous permissions**: Compare each ACE's access mask against dangerous permission bitmasks: GenericAll (0x10000000), WriteDACL (0x00040000), WriteOwner (0x00080000), GenericWrite (0x40000000), and WriteProperty for specific extended rights. + +6. **Filter non-admin trustees**: Exclude expected administrative trustees (Domain Admins, Enterprise Admins, SYSTEM, Administrators) and flag ACEs where non-privileged users or groups hold dangerous permissions. + +7. **Map attack paths**: For each finding, document the potential attack chain (e.g., GenericAll on user allows password reset, WriteDACL on group allows adding self to group). + +8. **Generate remediation report**: Output a JSON report with all dangerous ACEs, affected objects, non-admin trustees, and recommended remediation steps. + +## Expected Output + +```json +{ + "domain": "corp.example.com", + "objects_scanned": 1247, + "dangerous_aces_found": 8, + "findings": [ + { + "severity": "critical", + "target_object": "CN=Domain Admins,CN=Users,DC=corp,DC=example,DC=com", + "target_type": "group", + "trustee": "CORP\\helpdesk-team", + "permission": "GenericAll", + "access_mask": "0x10000000", + "ace_type": "ACCESS_ALLOWED", + "attack_path": "GenericAll on Domain Admins group allows adding arbitrary members", + "remediation": "Remove GenericAll ACE for helpdesk-team on Domain Admins" + } + ] +} +``` diff --git a/personas/_shared/skills/analyzing-active-directory-acl-abuse/references/api-reference.md b/personas/_shared/skills/analyzing-active-directory-acl-abuse/references/api-reference.md new file mode 100644 index 0000000..b2c5500 --- /dev/null +++ b/personas/_shared/skills/analyzing-active-directory-acl-abuse/references/api-reference.md @@ -0,0 +1,94 @@ +# Active Directory ACL Abuse API Reference + +## ldap3 Python Connection + +```python +from ldap3 import Server, Connection, ALL, NTLM, SUBTREE + +server = Server("192.168.1.10", get_info=ALL, use_ssl=False) +conn = Connection(server, user="DOMAIN\\user", password="pass", + authentication=NTLM, auto_bind=True) + +# Search with nTSecurityDescriptor +conn.search( + "DC=corp,DC=example,DC=com", + "(objectClass=group)", + search_scope=SUBTREE, + attributes=["distinguishedName", "sAMAccountName", + "objectClass", "nTSecurityDescriptor"], +) +``` + +## SDDL ACE Format + +``` +ACE String: (ace_type;ace_flags;rights;object_guid;inherit_guid;trustee_sid) +Example: (A;;GA;;;S-1-5-21-xxx-512) +``` + +| Component | Description | +|-----------|-------------| +| `A` | Access Allowed | +| `D` | Access Denied | +| `OA` | Object Access Allowed | +| `GA` | Generic All | +| `GW` | Generic Write | +| `WD` | Write DACL | +| `WO` | Write Owner | + +## Dangerous Permission Bitmasks + +| Permission | Hex Mask | Risk | +|-----------|----------|------| +| GenericAll | `0x10000000` | Full control over object | +| GenericWrite | `0x40000000` | Modify all writable attributes | +| WriteDACL | `0x00040000` | Modify object permissions | +| WriteOwner | `0x00080000` | Take object ownership | +| WriteProperty | `0x00000020` | Write specific properties | +| ExtendedRight | `0x00000100` | Extended rights (password reset, etc.) | +| Self | `0x00000008` | Self-membership modification | +| Delete | `0x00010000` | Delete the object | + +## BloodHound Cypher Queries for ACL Paths + +```cypher +-- Find all users with GenericAll on Domain Admins +MATCH p=(n:User)-[r:GenericAll]->(g:Group {name:"DOMAIN ADMINS@CORP.COM"}) +RETURN p + +-- Find WriteDACL paths from non-admins to high-value targets +MATCH (n:User {admincount:false}) +MATCH p=allShortestPaths((n)-[r:WriteDacl|WriteOwner|GenericAll*1..]->(m:Group)) +WHERE m.highvalue = true +RETURN p + +-- Find GenericWrite on computers for RBCD attacks +MATCH p=(n:User)-[r:GenericWrite]->(c:Computer) +WHERE NOT n.admincount +RETURN n.name, c.name + +-- Enumerate all outbound ACL edges for a principal +MATCH p=(n {name:"HELPDESK@CORP.COM"})-[r:GenericAll|GenericWrite|WriteDacl|WriteOwner|Owns]->(m) +RETURN type(r), m.name, labels(m) + +-- Find shortest ACL abuse path to Domain Admin +MATCH (n:User {name:"JSMITH@CORP.COM"}) +MATCH (da:Group {name:"DOMAIN ADMINS@CORP.COM"}) +MATCH p=shortestPath((n)-[r:MemberOf|GenericAll|GenericWrite|WriteDacl|WriteOwner|Owns|ForceChangePassword*1..]->(da)) +RETURN p +``` + +## PowerView Commands for ACL Enumeration + +```powershell +# Get ACL for Domain Admins group +Get-DomainObjectAcl -Identity "Domain Admins" -ResolveGUIDs + +# Find interesting ACEs for non-admin users +Find-InterestingDomainAcl -ResolveGUIDs | Where-Object { + $_.ActiveDirectoryRights -match "GenericAll|WriteDacl|WriteOwner" +} + +# Get ACL for specific OU +Get-DomainObjectAcl -SearchBase "OU=Servers,DC=corp,DC=com" -ResolveGUIDs +``` diff --git a/personas/_shared/skills/analyzing-active-directory-acl-abuse/scripts/agent.py b/personas/_shared/skills/analyzing-active-directory-acl-abuse/scripts/agent.py new file mode 100644 index 0000000..e4c294d --- /dev/null +++ b/personas/_shared/skills/analyzing-active-directory-acl-abuse/scripts/agent.py @@ -0,0 +1,258 @@ +#!/usr/bin/env python3 +"""Active Directory ACL abuse detection using ldap3 to find dangerous permissions.""" + +import argparse +import json +import struct + +from ldap3 import Server, Connection, ALL, NTLM, SUBTREE + + +DANGEROUS_MASKS = { + "GenericAll": 0x10000000, + "GenericWrite": 0x40000000, + "WriteDACL": 0x00040000, + "WriteOwner": 0x00080000, + "WriteProperty": 0x00000020, + "Self": 0x00000008, + "ExtendedRight": 0x00000100, + "DeleteChild": 0x00000002, + "Delete": 0x00010000, +} + +ADMIN_SIDS = { + "S-1-5-18", + "S-1-5-32-544", + "S-1-5-9", +} + +ADMIN_RID_SUFFIXES = { + "-500", + "-512", + "-516", + "-518", + "-519", + "-498", +} + +ATTACK_PATHS = { + "GenericAll": { + "user": "Full control allows password reset, Kerberoasting via SPN, or shadow credential attack", + "group": "Full control allows adding arbitrary members to the group", + "computer": "Full control allows resource-based constrained delegation attack", + "organizationalUnit": "Full control allows linking malicious GPO or moving objects", + }, + "WriteDACL": { + "user": "Can modify DACL to grant self GenericAll, then reset password", + "group": "Can modify DACL to grant self write membership, then add self", + "computer": "Can modify DACL to grant self full control on machine account", + "organizationalUnit": "Can modify DACL to gain control over OU child objects", + }, + "WriteOwner": { + "user": "Can take ownership then modify DACL to escalate privileges", + "group": "Can take ownership of group then modify membership", + "computer": "Can take ownership then configure delegation abuse", + "organizationalUnit": "Can take ownership then control OU policies", + }, + "GenericWrite": { + "user": "Can write scriptPath for logon script execution or modify SPN for Kerberoasting", + "group": "Can modify group attributes including membership", + "computer": "Can write msDS-AllowedToActOnBehalfOfOtherIdentity for RBCD attack", + "organizationalUnit": "Can modify OU attributes and link GPO", + }, +} + + +def is_admin_sid(sid: str, domain_sid: str) -> bool: + if sid in ADMIN_SIDS: + return True + for suffix in ADMIN_RID_SUFFIXES: + if sid == domain_sid + suffix: + return True + return False + + +def parse_sid(raw: bytes) -> str: + if len(raw) < 8: + return "" + revision = raw[0] + sub_auth_count = raw[1] + authority = int.from_bytes(raw[2:8], byteorder="big") + subs = [] + for i in range(sub_auth_count): + offset = 8 + i * 4 + if offset + 4 > len(raw): + break + subs.append(struct.unpack(" list: + aces = [] + if len(descriptor_bytes) < 20: + return aces + revision = descriptor_bytes[0] + control = struct.unpack("= len(descriptor_bytes): + return aces + dacl = descriptor_bytes[dacl_offset:] + if len(dacl) < 8: + return aces + acl_size = struct.unpack(" len(dacl): + break + ace_type = dacl[offset] + ace_flags = dacl[offset + 1] + ace_size = struct.unpack(" len(dacl): + break + if ace_type in (0x00, 0x05): + if offset + 8 <= len(dacl): + access_mask = struct.unpack(" str: + try: + conn.search(base_dn, f"(objectSid={sid})", attributes=["sAMAccountName", "cn"]) + if conn.entries: + entry = conn.entries[0] + return str(entry.sAMAccountName) if hasattr(entry, "sAMAccountName") else str(entry.cn) + except Exception: + pass + return sid + + +def get_domain_sid(conn: Connection, base_dn: str) -> str: + conn.search(base_dn, "(objectClass=domain)", attributes=["objectSid"]) + if conn.entries: + raw = conn.entries[0].objectSid.raw_values[0] + return parse_sid(raw) + return "" + + +def analyze_acls(dc_ip: str, domain: str, username: str, password: str, + target_ou: str) -> dict: + server = Server(dc_ip, get_info=ALL, use_ssl=False) + domain_parts = domain.split(".") + base_dn = ",".join(f"DC={p}" for p in domain_parts) + search_base = target_ou if target_ou else base_dn + ntlm_user = f"{domain}\\{username}" + + conn = Connection(server, user=ntlm_user, password=password, + authentication=NTLM, auto_bind=True) + domain_sid = get_domain_sid(conn, base_dn) + + conn.search( + search_base, + "(|(objectClass=user)(objectClass=group)(objectClass=computer)(objectClass=organizationalUnit))", + search_scope=SUBTREE, + attributes=["distinguishedName", "sAMAccountName", "objectClass", "nTSecurityDescriptor"], + ) + + findings = [] + objects_scanned = 0 + sid_cache = {} + + for entry in conn.entries: + objects_scanned += 1 + dn = str(entry.distinguishedName) + obj_classes = [str(c) for c in entry.objectClass.values] if hasattr(entry, "objectClass") else [] + obj_type = "unknown" + for oc in obj_classes: + if oc.lower() in ("user", "group", "computer", "organizationalunit"): + obj_type = oc.lower() + break + + if not hasattr(entry, "nTSecurityDescriptor"): + continue + raw_sd = entry.nTSecurityDescriptor.raw_values + if not raw_sd: + continue + sd_bytes = raw_sd[0] + aces = parse_acl(sd_bytes) + + for ace in aces: + trustee_sid = ace["trustee_sid"] + if is_admin_sid(trustee_sid, domain_sid): + continue + if trustee_sid not in sid_cache: + sid_cache[trustee_sid] = resolve_sid(conn, base_dn, trustee_sid) + trustee_name = sid_cache[trustee_sid] + + for perm in ace["permissions"]: + if perm in ("Delete", "DeleteChild", "Self", "WriteProperty", "ExtendedRight"): + severity = "medium" + else: + severity = "critical" + attack = ATTACK_PATHS.get(perm, {}).get(obj_type, + f"{perm} on {obj_type} may allow privilege escalation") + findings.append({ + "severity": severity, + "target_object": dn, + "target_type": obj_type, + "trustee": trustee_name, + "trustee_sid": trustee_sid, + "permission": perm, + "access_mask": ace["access_mask"], + "ace_type": ace["ace_type"], + "attack_path": attack, + "remediation": f"Remove {perm} ACE for {trustee_name} on {dn}", + }) + + conn.unbind() + findings.sort(key=lambda f: 0 if f["severity"] == "critical" else 1) + return { + "domain": domain, + "domain_sid": domain_sid, + "search_base": search_base, + "objects_scanned": objects_scanned, + "dangerous_aces_found": len(findings), + "findings": findings, + } + + +def main(): + parser = argparse.ArgumentParser(description="Active Directory ACL Abuse Analyzer") + parser.add_argument("--dc-ip", required=True, help="Domain Controller IP address") + parser.add_argument("--domain", required=True, help="AD domain name (e.g., corp.example.com)") + parser.add_argument("--username", required=True, help="Domain username for LDAP bind") + parser.add_argument("--password", required=True, help="Domain user password") + parser.add_argument("--target-ou", default=None, + help="Target OU distinguished name to scope the search") + parser.add_argument("--output", default=None, help="Output JSON file path") + args = parser.parse_args() + + result = analyze_acls(args.dc_ip, args.domain, args.username, + args.password, args.target_ou) + report = json.dumps(result, indent=2) + if args.output: + with open(args.output, "w") as f: + f.write(report) + print(report) + + +if __name__ == "__main__": + main() diff --git a/personas/_shared/skills/analyzing-android-malware-with-apktool/LICENSE b/personas/_shared/skills/analyzing-android-malware-with-apktool/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-android-malware-with-apktool/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-android-malware-with-apktool/SKILL.md b/personas/_shared/skills/analyzing-android-malware-with-apktool/SKILL.md new file mode 100644 index 0000000..1bb9a19 --- /dev/null +++ b/personas/_shared/skills/analyzing-android-malware-with-apktool/SKILL.md @@ -0,0 +1,61 @@ +--- +name: analyzing-android-malware-with-apktool +description: Perform static analysis of Android APK malware samples using apktool for decompilation, jadx for Java source + recovery, and androguard for permission analysis, manifest inspection, and suspicious API call detection. +domain: cybersecurity +subdomain: malware-analysis +tags: +- Android +- APK +- apktool +- jadx +- androguard +- mobile-malware +- static-analysis +- reverse-engineering +version: '1.0' +author: mahipal +license: Apache-2.0 +nist_csf: +- DE.AE-02 +- RS.AN-03 +- ID.RA-01 +- DE.CM-01 +--- + +# Analyzing Android Malware with Apktool + +## Overview + +Android malware distributed as APK files can be statically analyzed to extract permissions, activities, services, broadcast receivers, and suspicious API calls without executing the sample. This skill uses androguard for programmatic APK analysis, identifying dangerous permission combinations, obfuscated code patterns, dynamic code loading, reflection-based API calls, and network communication indicators. + + +## When to Use + +- When investigating security incidents that require analyzing android malware with apktool +- When building detection rules or threat hunting queries for this domain +- When SOC analysts need structured procedures for this analysis type +- When validating security monitoring coverage for related attack techniques + +## Prerequisites + +- Python 3.9+ with `androguard` +- apktool (for resource decompilation) +- jadx (for Java source recovery, optional) +- Isolated analysis environment (VM or sandbox) +- Sample APK files for analysis + +## Steps + +1. Parse APK with androguard to extract manifest metadata +2. Enumerate requested permissions and flag dangerous combinations +3. List activities, services, receivers, and providers from manifest +4. Scan for suspicious API calls (reflection, crypto, SMS, telephony) +5. Detect dynamic code loading patterns (DexClassLoader, Runtime.exec) +6. Extract hardcoded URLs, IPs, and C2 indicators from strings +7. Generate risk assessment report with MITRE ATT&CK mobile mappings + +## Expected Output + +- JSON report with permission analysis, component listing, suspicious API calls, network indicators, and risk score +- Extracted strings and potential IOCs from the APK diff --git a/personas/_shared/skills/analyzing-android-malware-with-apktool/references/api-reference.md b/personas/_shared/skills/analyzing-android-malware-with-apktool/references/api-reference.md new file mode 100644 index 0000000..44d8547 --- /dev/null +++ b/personas/_shared/skills/analyzing-android-malware-with-apktool/references/api-reference.md @@ -0,0 +1,69 @@ +# API Reference — Analyzing Android Malware with Apktool + +## Libraries Used +- **androguard**: Python APK/DEX analysis — `AnalyzeAPK()`, permission enumeration, API call scanning +- **re**: Regex extraction of URLs, IPs, base64 patterns from DEX strings +- **json**: JSON serialization for analysis reports + +## CLI Interface +``` +python agent.py sample.apk permissions +python agent.py sample.apk manifest +python agent.py sample.apk apis +python agent.py sample.apk strings +python agent.py sample.apk full +python agent.py sample.apk # defaults to full analysis +``` + +## Core Functions + +### `analyze_permissions(apk)` — Permission risk assessment +Calls `apk.get_permissions()`. Flags 20 dangerous permissions including +SEND_SMS, READ_CONTACTS, BIND_DEVICE_ADMIN, BIND_ACCESSIBILITY_SERVICE. +Risk: CRITICAL >= 8 dangerous, HIGH >= 5, MEDIUM >= 2, LOW < 2. + +### `analyze_manifest(apk)` — Manifest component extraction +Calls `apk.get_activities()`, `get_services()`, `get_receivers()`, `get_providers()`. +Returns package name, version, SDK levels, and all component lists. + +### `scan_suspicious_apis(dx)` — Suspicious API call detection +Searches DEX analysis for 14 patterns including: +- `Runtime.exec`, `ProcessBuilder.start` — command execution +- `DexClassLoader.loadClass` — dynamic code loading +- `Method.invoke`, `Class.forName` — reflection +- `Cipher.getInstance` — cryptographic operations +- `SmsManager.sendTextMessage` — SMS abuse + +### `extract_strings(dx, apk)` — IOC extraction from DEX strings +Regex extraction of HTTP/HTTPS URLs, external IP addresses, and base64 strings. +Filters out private IP ranges (10.x, 192.168.x, 172.16.x, 127.x). + +### `detect_obfuscation(apk, dx)` — Obfuscation indicator detection +Checks for single-letter class names (ProGuard), multi-DEX, native libraries. + +### `full_analysis(apk_path)` — Comprehensive malware assessment + +## Androguard API +| Method | Returns | +|--------|---------| +| `AnalyzeAPK(path)` | `(APK, list[DEX], Analysis)` tuple | +| `apk.get_permissions()` | List of Android permissions | +| `apk.get_activities()` | Activity component names | +| `apk.get_services()` | Service component names | +| `apk.get_receivers()` | BroadcastReceiver names | +| `apk.get_package()` | Package name string | +| `dx.find_methods(classname, methodname)` | Matching method analysis objects | +| `dx.get_strings()` | All strings from DEX files | +| `dx.get_classes()` | All class analysis objects | + +## Risk Scoring +| Factor | Max Points | +|--------|-----------| +| Dangerous permissions (8 pts each) | 40 | +| Suspicious API calls (10 pts each) | 30 | +| External IPs (5 pts each) | 15 | +| Obfuscation detected | 15 | + +## Dependencies +- `androguard` >= 3.4.0 +- Isolated analysis environment recommended diff --git a/personas/_shared/skills/analyzing-android-malware-with-apktool/scripts/agent.py b/personas/_shared/skills/analyzing-android-malware-with-apktool/scripts/agent.py new file mode 100644 index 0000000..f2daacc --- /dev/null +++ b/personas/_shared/skills/analyzing-android-malware-with-apktool/scripts/agent.py @@ -0,0 +1,228 @@ +#!/usr/bin/env python3 +"""Agent for static analysis of Android APK malware using androguard.""" + +import json +import re +import argparse +from datetime import datetime + +try: + from androguard.core.apk import APK + from androguard.core.dex import DEX + from androguard.misc import AnalyzeAPK +except ImportError: + APK = None + AnalyzeAPK = None + +DANGEROUS_PERMISSIONS = [ + "android.permission.SEND_SMS", "android.permission.READ_SMS", + "android.permission.RECEIVE_SMS", "android.permission.READ_CONTACTS", + "android.permission.READ_CALL_LOG", "android.permission.RECORD_AUDIO", + "android.permission.CAMERA", "android.permission.ACCESS_FINE_LOCATION", + "android.permission.READ_PHONE_STATE", "android.permission.CALL_PHONE", + "android.permission.WRITE_EXTERNAL_STORAGE", "android.permission.READ_EXTERNAL_STORAGE", + "android.permission.INSTALL_PACKAGES", "android.permission.REQUEST_INSTALL_PACKAGES", + "android.permission.SYSTEM_ALERT_WINDOW", "android.permission.BIND_ACCESSIBILITY_SERVICE", + "android.permission.BIND_DEVICE_ADMIN", "android.permission.RECEIVE_BOOT_COMPLETED", + "android.permission.WRITE_SETTINGS", "android.permission.CHANGE_WIFI_STATE", +] + +SUSPICIOUS_API_PATTERNS = [ + r"Ljava/lang/Runtime;->exec", + r"Ljava/lang/ProcessBuilder;->start", + r"Ldalvik/system/DexClassLoader;->loadClass", + r"Ljava/lang/reflect/Method;->invoke", + r"Ljava/lang/Class;->forName", + r"Ljavax/crypto/Cipher;->getInstance", + r"Landroid/telephony/SmsManager;->sendTextMessage", + r"Landroid/app/admin/DevicePolicyManager;->lockNow", + r"Landroid/content/pm/PackageManager;->setComponentEnabledSetting", + r"Ljava/net/HttpURLConnection;->connect", + r"Lokhttp3/OkHttpClient;->newCall", + r"Landroid/webkit/WebView;->loadUrl", + r"Landroid/os/Build;->SERIAL", + r"Landroid/provider/Settings\$Secure;->getString", +] + + +def analyze_permissions(apk): + """Analyze requested permissions and flag dangerous ones.""" + permissions = apk.get_permissions() + dangerous = [p for p in permissions if p in DANGEROUS_PERMISSIONS] + return { + "total_permissions": len(permissions), + "permissions": permissions, + "dangerous_permissions": dangerous, + "dangerous_count": len(dangerous), + "permission_risk": "CRITICAL" if len(dangerous) >= 8 else "HIGH" if len(dangerous) >= 5 else "MEDIUM" if len(dangerous) >= 2 else "LOW", + } + + +def analyze_manifest(apk): + """Extract manifest components: activities, services, receivers, providers.""" + activities = apk.get_activities() + services = apk.get_services() + receivers = apk.get_receivers() + providers = apk.get_providers() + return { + "package_name": apk.get_package(), + "app_name": apk.get_app_name(), + "version_name": apk.get_androidversion_name(), + "version_code": apk.get_androidversion_code(), + "min_sdk": apk.get_min_sdk_version(), + "target_sdk": apk.get_target_sdk_version(), + "activities": list(activities), + "services": list(services), + "receivers": list(receivers), + "providers": list(providers), + "activity_count": len(activities), + "service_count": len(services), + "receiver_count": len(receivers), + "provider_count": len(providers), + } + + +def scan_suspicious_apis(dx): + """Scan DEX analysis for suspicious API calls.""" + findings = [] + if not dx: + return findings + for pattern in SUSPICIOUS_API_PATTERNS: + class_name = pattern.split(";->")[0] + ";" + method_name = pattern.split(";->")[1] if ";->" in pattern else None + for method in dx.find_methods(classname=class_name, methodname=method_name): + xrefs = list(method.get_xref_from()) + if xrefs: + findings.append({ + "api": pattern, + "callers": len(xrefs), + "first_caller_class": str(xrefs[0][0].name) if xrefs else None, + }) + return findings + + +def extract_strings(dx, apk): + """Extract suspicious strings: URLs, IPs, base64 patterns.""" + url_pattern = re.compile(r'https?://[\w\-._~:/?#\[\]@!$&\'()*+,;=]+', re.IGNORECASE) + ip_pattern = re.compile(r'\b(?:\d{1,3}\.){3}\d{1,3}\b') + b64_pattern = re.compile(r'[A-Za-z0-9+/]{30,}={0,2}') + + urls = set() + ips = set() + b64_strings = [] + + if dx: + for s in dx.get_strings(): + val = str(s) + urls.update(url_pattern.findall(val)) + ips.update(ip_pattern.findall(val)) + b64_matches = b64_pattern.findall(val) + b64_strings.extend(b64_matches[:5]) + + private_ips = {"10.", "192.168.", "172.16.", "127.0."} + external_ips = [ip for ip in ips if not any(ip.startswith(p) for p in private_ips)] + + return { + "urls": sorted(urls)[:30], + "external_ips": sorted(external_ips)[:20], + "suspicious_base64": b64_strings[:10], + "url_count": len(urls), + "external_ip_count": len(external_ips), + } + + +def detect_obfuscation(apk, dx): + """Detect code obfuscation indicators.""" + indicators = [] + if dx: + short_class_names = 0 + for cls in dx.get_classes(): + name = str(cls.name) + parts = name.replace("/", ".").split(".") + if any(len(p) == 1 and p.isalpha() for p in parts): + short_class_names += 1 + if short_class_names > 10: + indicators.append({"type": "single_letter_classes", "count": short_class_names}) + + dex_files = [f for f in apk.get_files() if f.endswith(".dex")] + if len(dex_files) > 1: + indicators.append({"type": "multi_dex", "dex_count": len(dex_files)}) + + native_libs = [f for f in apk.get_files() if f.endswith(".so")] + if native_libs: + indicators.append({"type": "native_libraries", "libs": native_libs[:10]}) + + return { + "obfuscation_indicators": indicators, + "likely_obfuscated": len(indicators) > 0, + } + + +def full_analysis(apk_path): + """Run comprehensive APK malware analysis.""" + if not APK or not AnalyzeAPK: + return {"error": "androguard not installed: pip install androguard"} + + a, d, dx = AnalyzeAPK(apk_path) + + perm_analysis = analyze_permissions(a) + manifest = analyze_manifest(a) + suspicious_apis = scan_suspicious_apis(dx) + strings = extract_strings(dx, a) + obfuscation = detect_obfuscation(a, dx) + + risk_score = 0 + risk_score += min(perm_analysis["dangerous_count"] * 8, 40) + risk_score += min(len(suspicious_apis) * 10, 30) + risk_score += min(strings["external_ip_count"] * 5, 15) + risk_score += 15 if obfuscation["likely_obfuscated"] else 0 + risk_score = min(risk_score, 100) + + return { + "analysis_type": "Android APK Static Analysis", + "timestamp": datetime.utcnow().isoformat(), + "file": apk_path, + "manifest": manifest, + "permissions": perm_analysis, + "suspicious_apis": suspicious_apis[:20], + "strings": strings, + "obfuscation": obfuscation, + "risk_score": risk_score, + "risk_level": "CRITICAL" if risk_score >= 70 else "HIGH" if risk_score >= 50 else "MEDIUM" if risk_score >= 25 else "LOW", + "mitre_techniques": [ + {"id": "T1418", "name": "Software Discovery"} if manifest["service_count"] > 5 else None, + {"id": "T1417", "name": "Input Capture"} if "android.permission.BIND_ACCESSIBILITY_SERVICE" in perm_analysis["permissions"] else None, + {"id": "T1582", "name": "SMS Control"} if "android.permission.SEND_SMS" in perm_analysis["permissions"] else None, + {"id": "T1404", "name": "Exploitation for Privilege Escalation"} if any("DevicePolicyManager" in a.get("api", "") for a in suspicious_apis) else None, + ], + } + + +def main(): + parser = argparse.ArgumentParser(description="Android APK Malware Analysis Agent") + parser.add_argument("apk", help="Path to APK file") + sub = parser.add_subparsers(dest="command") + sub.add_parser("permissions", help="Analyze permissions") + sub.add_parser("manifest", help="Extract manifest components") + sub.add_parser("apis", help="Scan for suspicious API calls") + sub.add_parser("strings", help="Extract URLs, IPs, and encoded strings") + sub.add_parser("full", help="Full malware analysis") + args = parser.parse_args() + + if args.command == "full" or args.command is None: + result = full_analysis(args.apk) + else: + a, d, dx = AnalyzeAPK(args.apk) + if args.command == "permissions": + result = analyze_permissions(a) + elif args.command == "manifest": + result = analyze_manifest(a) + elif args.command == "apis": + result = scan_suspicious_apis(dx) + elif args.command == "strings": + result = extract_strings(dx, a) + print(json.dumps(result, indent=2, default=str)) + + +if __name__ == "__main__": + main() diff --git a/personas/_shared/skills/analyzing-api-gateway-access-logs/LICENSE b/personas/_shared/skills/analyzing-api-gateway-access-logs/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-api-gateway-access-logs/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-api-gateway-access-logs/SKILL.md b/personas/_shared/skills/analyzing-api-gateway-access-logs/SKILL.md new file mode 100644 index 0000000..24c08d0 --- /dev/null +++ b/personas/_shared/skills/analyzing-api-gateway-access-logs/SKILL.md @@ -0,0 +1,71 @@ +--- +name: analyzing-api-gateway-access-logs +description: 'Parses API Gateway access logs (AWS API Gateway, Kong, Nginx) to detect BOLA/IDOR attacks, rate limit bypass, + credential scanning, and injection attempts. Uses pandas for statistical analysis of request patterns and anomaly detection. + Use when investigating API abuse or building API-specific threat detection rules. + + ' +domain: cybersecurity +subdomain: security-operations +tags: +- analyzing +- api +- gateway +- access +version: '1.0' +author: mahipal +license: Apache-2.0 +nist_csf: +- DE.CM-01 +- RS.MA-01 +- GV.OV-01 +- DE.AE-02 +--- + +# Analyzing API Gateway Access Logs + + +## When to Use + +- When investigating security incidents that require analyzing api gateway access logs +- When building detection rules or threat hunting queries for this domain +- When SOC analysts need structured procedures for this analysis type +- When validating security monitoring coverage for related attack techniques + +## Prerequisites + +- Familiarity with security operations concepts and tools +- Access to a test or lab environment for safe execution +- Python 3.8+ with required dependencies installed +- Appropriate authorization for any testing activities + +## Instructions + +Parse API gateway access logs to identify attack patterns including broken object +level authorization (BOLA), excessive data exposure, and injection attempts. + +```python +import pandas as pd + +df = pd.read_json("api_gateway_logs.json", lines=True) +# Detect BOLA: same user accessing many different resource IDs +bola = df.groupby(["user_id", "endpoint"]).agg( + unique_ids=("resource_id", "nunique")).reset_index() +suspicious = bola[bola["unique_ids"] > 50] +``` + +Key detection patterns: +1. BOLA/IDOR: sequential resource ID enumeration +2. Rate limit bypass via header manipulation +3. Credential scanning (401 surges from single source) +4. SQL/NoSQL injection in query parameters +5. Unusual HTTP methods (DELETE, PATCH) on read-only endpoints + +## Examples + +```python +# Detect 401 surges indicating credential scanning +auth_failures = df[df["status_code"] == 401] +scanner_ips = auth_failures.groupby("source_ip").size() +scanners = scanner_ips[scanner_ips > 100] +``` diff --git a/personas/_shared/skills/analyzing-api-gateway-access-logs/references/api-reference.md b/personas/_shared/skills/analyzing-api-gateway-access-logs/references/api-reference.md new file mode 100644 index 0000000..54b6239 --- /dev/null +++ b/personas/_shared/skills/analyzing-api-gateway-access-logs/references/api-reference.md @@ -0,0 +1,58 @@ +# API Reference: Analyzing API Gateway Access Logs + +## AWS API Gateway Log Fields + +```json +{ + "requestId": "abc-123", + "ip": "203.0.113.50", + "httpMethod": "GET", + "resourcePath": "/api/users/{id}", + "status": 200, + "requestTime": "2025-03-15T14:00:00Z", + "responseLength": 1024 +} +``` + +## Pandas Log Analysis + +```python +import pandas as pd + +df = pd.read_json("access_logs.json", lines=True) + +# BOLA detection +df.groupby("user_id")["resource_id"].nunique() + +# Auth failure surge +df[df["status_code"] == 401].groupby("source_ip").size() + +# Request velocity +df.set_index("timestamp").resample("1min").size() +``` + +## OWASP API Top 10 Patterns + +| Risk | Detection Pattern | +|------|-------------------| +| BOLA (API1) | User accessing > 50 unique resource IDs | +| Broken Auth (API2) | > 100 401/403 from single IP | +| Excessive Data (API3) | Response size > 10x average | +| Rate Limit (API4) | > 100 req/min from single IP | +| BFLA (API5) | DELETE/PUT on read-only endpoints | +| Injection (API8) | SQL/NoSQL patterns in params | + +## Injection Regex Patterns + +```python +sql = r"union\s+select|drop\s+table|'\s*or\s+'1'" +nosql = r"\$ne|\$gt|\$regex|\$where" +xss = r"= threshold] + for _, row in bola_suspects.iterrows(): + findings.append({ + "user": row[user_col], + "unique_resources_accessed": int(row["unique_resources"]), + "total_requests": int(row["total_requests"]), + "type": "BOLA/IDOR", + "severity": "CRITICAL", + }) + return findings + + +def detect_auth_scanning(df, threshold=100): + """Detect credential scanning via 401/403 response surges.""" + findings = [] + auth_failures = df[df["status_code"].isin([401, 403])] + if auth_failures.empty: + return findings + ip_col = "source_ip" if "source_ip" in df.columns else "client_ip" + ip_failures = auth_failures.groupby(ip_col).agg( + failure_count=("status_code", "count"), + unique_endpoints=("request_path", "nunique") if "request_path" in df.columns + else ("path", "nunique"), + ).reset_index() + scanners = ip_failures[ip_failures["failure_count"] >= threshold] + for _, row in scanners.iterrows(): + findings.append({ + "source_ip": row[ip_col], + "auth_failures": int(row["failure_count"]), + "endpoints_probed": int(row["unique_endpoints"]), + "type": "credential_scanning", + "severity": "HIGH", + }) + return findings + + +def detect_injection_attempts(df): + """Detect SQL/NoSQL injection attempts in request parameters.""" + injection_patterns = [ + r"(?:union\s+select|select\s+.*\s+from|drop\s+table|insert\s+into)", + r"(?:'\s*or\s+'1'\s*=\s*'1|'\s*or\s+1\s*=\s*1)", + r'(?:\$ne|\$gt|\$lt|\$regex|\$where)', + r'(?: threshold] + if len(bursts) > 0: + findings.append({ + "source_ip": ip, + "max_requests_per_min": int(resampled.max()), + "burst_periods": len(bursts), + "type": "rate_limit_bypass", + "severity": "MEDIUM", + }) + return sorted(findings, key=lambda x: x["max_requests_per_min"], reverse=True)[:50] + + +def detect_unusual_methods(df): + """Detect unusual HTTP methods on typically read-only endpoints.""" + findings = [] + dangerous_methods = {"DELETE", "PUT", "PATCH"} + method_col = "method" if "method" in df.columns else "http_method" + path_col = "request_path" if "request_path" in df.columns else "path" + unusual = df[df[method_col].str.upper().isin(dangerous_methods)] + for _, row in unusual.iterrows(): + findings.append({ + "source_ip": row.get("source_ip", row.get("client_ip", "")), + "method": row[method_col], + "path": row[path_col], + "status_code": int(row.get("status_code", 0)), + "type": "unusual_method", + "severity": "MEDIUM", + }) + return findings[:200] + + +def main(): + parser = argparse.ArgumentParser(description="API Gateway Log Analysis Agent") + parser.add_argument("--log-file", required=True, help="API gateway log file") + parser.add_argument("--output", default="api_gateway_report.json") + parser.add_argument("--action", choices=[ + "bola", "auth_scan", "injection", "rate_limit", "full_analysis" + ], default="full_analysis") + args = parser.parse_args() + + df = load_api_logs(args.log_file) + report = {"generated_at": datetime.utcnow().isoformat(), "total_requests": len(df), + "findings": {}} + print(f"[+] Loaded {len(df)} API requests") + + if args.action in ("bola", "full_analysis"): + findings = detect_bola_attacks(df) + report["findings"]["bola"] = findings + print(f"[+] BOLA suspects: {len(findings)}") + + if args.action in ("auth_scan", "full_analysis"): + findings = detect_auth_scanning(df) + report["findings"]["auth_scanning"] = findings + print(f"[+] Auth scanners: {len(findings)}") + + if args.action in ("injection", "full_analysis"): + findings = detect_injection_attempts(df) + report["findings"]["injection_attempts"] = findings + print(f"[+] Injection attempts: {len(findings)}") + + if args.action in ("rate_limit", "full_analysis"): + findings = detect_rate_limit_bypass(df) + report["findings"]["rate_limit_bypass"] = findings + print(f"[+] Rate limit bypasses: {len(findings)}") + + with open(args.output, "w") as f: + json.dump(report, f, indent=2, default=str) + print(f"[+] Report saved to {args.output}") + + +if __name__ == "__main__": + main() diff --git a/personas/_shared/skills/analyzing-apt-group-with-mitre-navigator/LICENSE b/personas/_shared/skills/analyzing-apt-group-with-mitre-navigator/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-apt-group-with-mitre-navigator/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-apt-group-with-mitre-navigator/SKILL.md b/personas/_shared/skills/analyzing-apt-group-with-mitre-navigator/SKILL.md new file mode 100644 index 0000000..9e5bfab --- /dev/null +++ b/personas/_shared/skills/analyzing-apt-group-with-mitre-navigator/SKILL.md @@ -0,0 +1,285 @@ +--- +name: analyzing-apt-group-with-mitre-navigator +description: Analyze advanced persistent threat (APT) group techniques using MITRE ATT&CK Navigator to create layered heatmaps + of adversary TTPs for detection gap analysis and threat-informed defense. +domain: cybersecurity +subdomain: threat-intelligence +tags: +- mitre-attack +- navigator +- apt +- threat-actor +- ttp-analysis +- heatmap +- detection-gap +- threat-intelligence +version: '1.0' +author: mahipal +license: Apache-2.0 +d3fend_techniques: +- Executable Denylisting +- Execution Isolation +- File Metadata Consistency Validation +- Content Format Conversion +- File Content Analysis +nist_csf: +- ID.RA-01 +- ID.RA-05 +- DE.CM-01 +- DE.AE-02 +--- +# Analyzing APT Group with MITRE ATT&CK Navigator + +## Overview + +MITRE ATT&CK Navigator is a web-based tool for annotating and exploring ATT&CK matrices, enabling analysts to visualize threat actor technique coverage, compare multiple APT groups, identify detection gaps, and build threat-informed defense strategies. This skill covers querying ATT&CK data programmatically, mapping APT group TTPs to Navigator layers, creating multi-layer overlays for gap analysis, and generating actionable intelligence reports for detection engineering teams. + + +## When to Use + +- When investigating security incidents that require analyzing apt group with mitre navigator +- When building detection rules or threat hunting queries for this domain +- When SOC analysts need structured procedures for this analysis type +- When validating security monitoring coverage for related attack techniques + +## Prerequisites + +- Python 3.9+ with `attackcti`, `mitreattack-python`, `stix2`, `requests` libraries +- ATT&CK Navigator (https://mitre-attack.github.io/attack-navigator/) or local deployment +- Understanding of ATT&CK Enterprise matrix: 14 Tactics, 200+ Techniques, Sub-techniques +- Access to threat intelligence reports or MISP/OpenCTI for threat actor data +- Familiarity with STIX 2.1 Intrusion Set and Attack Pattern objects + +## Key Concepts + +### ATT&CK Navigator Layers + +Navigator layers are JSON files that annotate ATT&CK techniques with scores, colors, comments, and metadata. Each layer can represent a single APT group's technique usage, a detection capability map, or a combined overlay. Layer version 4.5 supports enterprise-attack, mobile-attack, and ics-attack domains with filtering by platform (Windows, Linux, macOS, Cloud, Azure AD, Office 365, SaaS). + +### APT Group Profiles in ATT&CK + +ATT&CK catalogs over 140 threat groups with documented technique usage. Each group profile includes aliases, targeted sectors, associated campaigns, software used, and technique mappings with procedure-level detail. Groups are identified by G-codes (e.g., G0016 for APT29, G0007 for APT28, G0032 for Lazarus Group). + +### Multi-Layer Analysis + +The Navigator supports loading multiple layers simultaneously, allowing analysts to overlay threat actor TTPs against detection coverage to identify gaps, compare multiple APT groups to find common techniques worth prioritizing, and track technique coverage changes over time. + +## Workflow + +### Step 1: Query ATT&CK Data for APT Group + +```python +from attackcti import attack_client +import json + +lift = attack_client() + +# Get all threat groups +groups = lift.get_groups() +print(f"Total ATT&CK groups: {len(groups)}") + +# Find APT29 (Cozy Bear / Midnight Blizzard) +apt29 = next((g for g in groups if g.get('name') == 'APT29'), None) +if apt29: + print(f"Group: {apt29['name']}") + print(f"Aliases: {apt29.get('aliases', [])}") + print(f"Description: {apt29.get('description', '')[:300]}") + +# Get techniques used by APT29 (G0016) +techniques = lift.get_techniques_used_by_group("G0016") +print(f"APT29 uses {len(techniques)} techniques") + +technique_map = {} +for tech in techniques: + tech_id = "" + for ref in tech.get("external_references", []): + if ref.get("source_name") == "mitre-attack": + tech_id = ref.get("external_id", "") + break + if tech_id: + tactics = [p.get("phase_name", "") for p in tech.get("kill_chain_phases", [])] + technique_map[tech_id] = { + "name": tech.get("name", ""), + "tactics": tactics, + "description": tech.get("description", "")[:500], + "platforms": tech.get("x_mitre_platforms", []), + "data_sources": tech.get("x_mitre_data_sources", []), + } +``` + +### Step 2: Generate Navigator Layer JSON + +```python +def create_navigator_layer(group_name, technique_map, color="#ff6666"): + techniques_list = [] + for tech_id, info in technique_map.items(): + for tactic in info["tactics"]: + techniques_list.append({ + "techniqueID": tech_id, + "tactic": tactic, + "color": color, + "comment": info["name"], + "enabled": True, + "score": 100, + "metadata": [ + {"name": "group", "value": group_name}, + {"name": "platforms", "value": ", ".join(info["platforms"])}, + ], + }) + + layer = { + "name": f"{group_name} TTP Coverage", + "versions": {"attack": "16.1", "navigator": "5.1.0", "layer": "4.5"}, + "domain": "enterprise-attack", + "description": f"Techniques attributed to {group_name}", + "filters": { + "platforms": ["Linux", "macOS", "Windows", "Cloud", + "Azure AD", "Office 365", "SaaS", "Google Workspace"] + }, + "sorting": 0, + "layout": { + "layout": "side", "aggregateFunction": "average", + "showID": True, "showName": True, + "showAggregateScores": False, "countUnscored": False, + }, + "hideDisabled": False, + "techniques": techniques_list, + "gradient": {"colors": ["#ffffff", color], "minValue": 0, "maxValue": 100}, + "legendItems": [ + {"label": f"Used by {group_name}", "color": color}, + {"label": "Not observed", "color": "#ffffff"}, + ], + "showTacticRowBackground": True, + "tacticRowBackground": "#dddddd", + "selectTechniquesAcrossTactics": True, + "selectSubtechniquesWithParent": False, + "selectVisibleTechniques": False, + } + return layer + +layer = create_navigator_layer("APT29", technique_map) +with open("apt29_layer.json", "w") as f: + json.dump(layer, f, indent=2) +print("[+] Layer saved: apt29_layer.json") +``` + +### Step 3: Compare Multiple APT Groups + +```python +groups_to_compare = {"G0016": "APT29", "G0007": "APT28", "G0032": "Lazarus Group"} +group_techniques = {} + +for gid, gname in groups_to_compare.items(): + techs = lift.get_techniques_used_by_group(gid) + tech_ids = set() + for t in techs: + for ref in t.get("external_references", []): + if ref.get("source_name") == "mitre-attack": + tech_ids.add(ref.get("external_id", "")) + group_techniques[gname] = tech_ids + +common_to_all = set.intersection(*group_techniques.values()) +print(f"Techniques common to all groups: {len(common_to_all)}") +for tid in sorted(common_to_all): + print(f" {tid}") + +for gname, techs in group_techniques.items(): + others = set.union(*[t for n, t in group_techniques.items() if n != gname]) + unique = techs - others + print(f"\nUnique to {gname}: {len(unique)} techniques") +``` + +### Step 4: Detection Gap Analysis with Layer Overlay + +```python +# Define your current detection capabilities +detected_techniques = { + "T1059", "T1059.001", "T1071", "T1071.001", "T1566", "T1566.001", + "T1547", "T1547.001", "T1053", "T1053.005", "T1078", "T1027", +} + +actor_techniques = set(technique_map.keys()) +covered = actor_techniques.intersection(detected_techniques) +gaps = actor_techniques - detected_techniques + +print(f"=== Detection Gap Analysis for APT29 ===") +print(f"Actor techniques: {len(actor_techniques)}") +print(f"Detected: {len(covered)} ({len(covered)/len(actor_techniques)*100:.0f}%)") +print(f"Gaps: {len(gaps)} ({len(gaps)/len(actor_techniques)*100:.0f}%)") + +# Create gap layer (red = undetected, green = detected) +gap_techniques = [] +for tech_id in actor_techniques: + info = technique_map.get(tech_id, {}) + for tactic in info.get("tactics", [""]): + color = "#66ff66" if tech_id in detected_techniques else "#ff3333" + gap_techniques.append({ + "techniqueID": tech_id, + "tactic": tactic, + "color": color, + "comment": f"{'DETECTED' if tech_id in detected_techniques else 'GAP'}: {info.get('name', '')}", + "enabled": True, + "score": 100 if tech_id in detected_techniques else 0, + }) + +gap_layer = { + "name": "APT29 Detection Gap Analysis", + "versions": {"attack": "16.1", "navigator": "5.1.0", "layer": "4.5"}, + "domain": "enterprise-attack", + "description": "Green = detected, Red = gap", + "techniques": gap_techniques, + "gradient": {"colors": ["#ff3333", "#66ff66"], "minValue": 0, "maxValue": 100}, + "legendItems": [ + {"label": "Detected", "color": "#66ff66"}, + {"label": "Detection Gap", "color": "#ff3333"}, + ], +} +with open("apt29_gap_layer.json", "w") as f: + json.dump(gap_layer, f, indent=2) +``` + +### Step 5: Tactic Breakdown Analysis + +```python +from collections import defaultdict + +tactic_breakdown = defaultdict(list) +for tech_id, info in technique_map.items(): + for tactic in info["tactics"]: + tactic_breakdown[tactic].append({"id": tech_id, "name": info["name"]}) + +tactic_order = [ + "reconnaissance", "resource-development", "initial-access", + "execution", "persistence", "privilege-escalation", + "defense-evasion", "credential-access", "discovery", + "lateral-movement", "collection", "command-and-control", + "exfiltration", "impact", +] + +print("\n=== APT29 Tactic Breakdown ===") +for tactic in tactic_order: + techs = tactic_breakdown.get(tactic, []) + if techs: + print(f"\n{tactic.upper()} ({len(techs)} techniques):") + for t in techs: + print(f" {t['id']}: {t['name']}") +``` + +## Validation Criteria + +- ATT&CK data queried successfully via TAXII server +- APT group mapped to all documented techniques with procedure examples +- Navigator layer JSON validates and renders correctly in ATT&CK Navigator +- Multi-layer overlay shows threat actor vs. detection coverage +- Detection gap analysis identifies unmonitored techniques with data source recommendations +- Cross-group comparison reveals shared and unique TTPs +- Output is actionable for detection engineering prioritization + +## References + +- [MITRE ATT&CK Navigator](https://mitre-attack.github.io/attack-navigator/) +- [ATT&CK Groups](https://attack.mitre.org/groups/) +- [attackcti Python Library](https://github.com/OTRF/ATTACK-Python-Client) +- [Navigator Layer Format v4.5](https://github.com/mitre-attack/attack-navigator/blob/master/layers/LAYERFORMATv4_5.md) +- [CISA Best Practices for MITRE ATT&CK Mapping](https://www.cisa.gov/sites/default/files/2023-01/Best%20Practices%20for%20MITRE%20ATTCK%20Mapping.pdf) +- [Picus: Leverage MITRE ATT&CK for Threat Intelligence](https://www.picussecurity.com/how-to-leverage-the-mitre-attack-framework-for-threat-intelligence) diff --git a/personas/_shared/skills/analyzing-apt-group-with-mitre-navigator/references/api-reference.md b/personas/_shared/skills/analyzing-apt-group-with-mitre-navigator/references/api-reference.md new file mode 100644 index 0000000..156b957 --- /dev/null +++ b/personas/_shared/skills/analyzing-apt-group-with-mitre-navigator/references/api-reference.md @@ -0,0 +1,97 @@ +# API Reference: MITRE ATT&CK Navigator APT Analysis + +## ATT&CK Navigator Layer Format + +### Layer JSON Structure +```json +{ + "name": "APT29 - TTPs", + "versions": {"attack": "14", "navigator": "4.9.1", "layer": "4.5"}, + "domain": "enterprise-attack", + "techniques": [ + { + "techniqueID": "T1566.001", + "tactic": "initial-access", + "color": "#ff6666", + "score": 100, + "comment": "Used by APT29", + "enabled": true + } + ], + "gradient": {"colors": ["#ffffff", "#ff6666"], "minValue": 0, "maxValue": 100} +} +``` + +## ATT&CK STIX Data Access + +### Download Enterprise ATT&CK Bundle +```bash +curl -o enterprise-attack.json \ + https://raw.githubusercontent.com/mitre/cti/master/enterprise-attack/enterprise-attack.json +``` + +### STIX Object Types +| Type | Description | +|------|-------------| +| `intrusion-set` | APT groups / threat actors | +| `attack-pattern` | Techniques and sub-techniques | +| `relationship` | Links groups to techniques (`uses`) | +| `malware` | Malware families | +| `tool` | Legitimate tools used by adversaries | + +## mitreattack-python Library + +### Installation +```bash +pip install mitreattack-python +``` + +### Query Group Techniques +```python +from mitreattack.stix20 import MitreAttackData + +attack = MitreAttackData("enterprise-attack.json") +groups = attack.get_groups() +for g in groups: + techs = attack.get_techniques_used_by_group(g) + print(f"{g.name}: {len(techs)} techniques") +``` + +### Get Technique Details +```python +technique = attack.get_object_by_attack_id("T1566.001", "attack-pattern") +print(technique.name) # Spearphishing Attachment +print(technique.x_mitre_platforms) # ['Windows', 'macOS', 'Linux'] +``` + +## Navigator CLI (attack-navigator) + +### Export Layer to SVG +```bash +npx attack-navigator-export \ + --layer layer.json \ + --output output.svg \ + --theme dark +``` + +## ATT&CK API (TAXII) +```python +from stix2 import TAXIICollectionSource, Filter +from taxii2client.v20 import Collection + +collection = Collection( + "https://cti-taxii.mitre.org/stix/collections/95ecc380-afe9-11e4-9b6c-751b66dd541e/" +) +tc_source = TAXIICollectionSource(collection) +groups = tc_source.query([Filter("type", "=", "intrusion-set")]) +``` + +## Key APT Groups Reference +| ID | Name | Known Aliases | +|----|------|--------------| +| G0016 | APT29 | Cozy Bear, The Dukes, NOBELIUM | +| G0007 | APT28 | Fancy Bear, Sofacy, Strontium | +| G0022 | APT3 | Gothic Panda, UPS | +| G0032 | Lazarus Group | HIDDEN COBRA, Zinc | +| G0074 | Dragonfly 2.0 | Energetic Bear, Berserk Bear | +| G0010 | Turla | Waterbug, Venomous Bear | diff --git a/personas/_shared/skills/analyzing-apt-group-with-mitre-navigator/scripts/agent.py b/personas/_shared/skills/analyzing-apt-group-with-mitre-navigator/scripts/agent.py new file mode 100644 index 0000000..ec0bf88 --- /dev/null +++ b/personas/_shared/skills/analyzing-apt-group-with-mitre-navigator/scripts/agent.py @@ -0,0 +1,244 @@ +#!/usr/bin/env python3 +"""APT group analysis agent using MITRE ATT&CK Navigator layers. + +Queries ATT&CK data, maps APT techniques to Navigator layers, +performs detection gap analysis, and generates threat-informed reports. +""" + +import json +import os +import sys +from collections import Counter + +try: + import requests + HAS_REQUESTS = True +except ImportError: + HAS_REQUESTS = False + +ATTACK_ENTERPRISE_URL = "https://raw.githubusercontent.com/mitre/cti/master/enterprise-attack/enterprise-attack.json" + +NAVIGATOR_LAYER_TEMPLATE = { + "name": "", + "versions": {"attack": "14", "navigator": "4.9.1", "layer": "4.5"}, + "domain": "enterprise-attack", + "description": "", + "filters": {"platforms": ["Windows", "Linux", "macOS", "Cloud"]}, + "sorting": 0, + "layout": {"layout": "side", "aggregateFunction": "average", "showID": False, + "showName": True, "showAggregateScores": False, "countUnscored": False}, + "hideDisabled": False, + "techniques": [], + "gradient": {"colors": ["#ffffff", "#ff6666"], "minValue": 0, "maxValue": 100}, + "legendItems": [], + "metadata": [], + "links": [], + "showTacticRowBackground": False, + "tacticRowBackground": "#dddddd", + "selectTechniquesAcrossTactics": True, + "selectSubtechniquesWithParent": False, + "selectVisibleTechniques": False, +} + + +def load_attack_data(filepath=None): + """Load ATT&CK STIX bundle from file or download.""" + if filepath and os.path.exists(filepath): + with open(filepath, "r", encoding="utf-8") as f: + return json.load(f) + if HAS_REQUESTS: + print("[*] Downloading ATT&CK Enterprise data...") + resp = requests.get(ATTACK_ENTERPRISE_URL, timeout=60) + resp.raise_for_status() + return resp.json() + return None + + +def extract_groups(bundle): + """Extract intrusion-set (APT group) objects from STIX bundle.""" + groups = {} + for obj in bundle.get("objects", []): + if obj.get("type") == "intrusion-set": + name = obj.get("name", "Unknown") + aliases = obj.get("aliases", []) + ext_refs = obj.get("external_references", []) + attack_id = "" + for ref in ext_refs: + if ref.get("source_name") == "mitre-attack": + attack_id = ref.get("external_id", "") + break + groups[obj["id"]] = { + "name": name, "id": attack_id, "aliases": aliases, + "description": obj.get("description", "")[:200], + } + return groups + + +def extract_techniques(bundle): + """Extract attack-pattern (technique) objects from STIX bundle.""" + techniques = {} + for obj in bundle.get("objects", []): + if obj.get("type") == "attack-pattern" and not obj.get("revoked", False): + ext_refs = obj.get("external_references", []) + attack_id = "" + for ref in ext_refs: + if ref.get("source_name") == "mitre-attack": + attack_id = ref.get("external_id", "") + break + if attack_id: + tactics = [p["phase_name"] for p in obj.get("kill_chain_phases", [])] + techniques[obj["id"]] = { + "id": attack_id, "name": obj.get("name", ""), + "tactics": tactics, "platforms": obj.get("x_mitre_platforms", []), + } + return techniques + + +def map_group_techniques(bundle, group_stix_id, techniques): + """Map techniques used by a specific group via relationship objects.""" + group_techniques = [] + for obj in bundle.get("objects", []): + if (obj.get("type") == "relationship" and + obj.get("relationship_type") == "uses" and + obj.get("source_ref") == group_stix_id and + obj.get("target_ref", "").startswith("attack-pattern--")): + tech_id = obj["target_ref"] + if tech_id in techniques: + group_techniques.append(techniques[tech_id]) + return group_techniques + + +def build_navigator_layer(group_name, group_techniques, color="#ff6666", score=100): + """Build ATT&CK Navigator JSON layer for a group's techniques.""" + layer = json.loads(json.dumps(NAVIGATOR_LAYER_TEMPLATE)) + layer["name"] = f"{group_name} - TTPs" + layer["description"] = f"ATT&CK techniques attributed to {group_name}" + for tech in group_techniques: + entry = { + "techniqueID": tech["id"], + "tactic": tech["tactics"][0] if tech["tactics"] else "", + "color": color, + "comment": f"Used by {group_name}", + "enabled": True, + "metadata": [], + "links": [], + "showSubtechniques": False, + "score": score, + } + layer["techniques"].append(entry) + return layer + + +def detection_gap_analysis(group_techniques, detection_rules): + """Compare group TTPs against existing detection rules to find gaps.""" + covered = set() + for rule in detection_rules: + tech_id = rule.get("technique_id", "") + if tech_id: + covered.add(tech_id) + gaps = [] + for tech in group_techniques: + if tech["id"] not in covered: + gaps.append({ + "technique_id": tech["id"], + "technique_name": tech["name"], + "tactics": tech["tactics"], + "status": "NO DETECTION", + }) + coverage_pct = (len(covered & {t["id"] for t in group_techniques}) / + len(group_techniques) * 100) if group_techniques else 0 + return gaps, round(coverage_pct, 1) + + +def tactic_heatmap(group_techniques): + """Generate tactic-level heatmap showing technique distribution.""" + tactic_counts = Counter() + for tech in group_techniques: + for tactic in tech["tactics"]: + tactic_counts[tactic] += 1 + return dict(tactic_counts.most_common()) + + +def compare_groups(group_a_techs, group_b_techs): + """Compare two groups' technique sets for overlap analysis.""" + set_a = {t["id"] for t in group_a_techs} + set_b = {t["id"] for t in group_b_techs} + overlap = set_a & set_b + only_a = set_a - set_b + only_b = set_b - set_a + jaccard = len(overlap) / len(set_a | set_b) if (set_a | set_b) else 0 + return { + "overlap_count": len(overlap), "overlap_ids": sorted(overlap), + "only_group_a": len(only_a), "only_group_b": len(only_b), + "jaccard_similarity": round(jaccard, 4), + } + + +def save_layer(layer, output_path): + """Save Navigator layer to JSON file.""" + with open(output_path, "w", encoding="utf-8") as f: + json.dump(layer, f, indent=2) + print(f"[+] Layer saved: {output_path}") + + +if __name__ == "__main__": + print("=" * 60) + print("APT Group Analysis Agent - MITRE ATT&CK Navigator") + print("TTP mapping, detection gap analysis, group comparison") + print("=" * 60) + + group_name = sys.argv[1] if len(sys.argv) > 1 else None + attack_file = sys.argv[2] if len(sys.argv) > 2 else None + + bundle = load_attack_data(attack_file) + if not bundle: + print("\n[!] Cannot load ATT&CK data. Provide STIX bundle path or install requests.") + print("[DEMO] Usage:") + print(" python agent.py APT29 enterprise-attack.json") + print(" python agent.py APT28 # downloads from GitHub") + sys.exit(1) + + groups = extract_groups(bundle) + techniques = extract_techniques(bundle) + print(f"[*] Loaded {len(groups)} groups, {len(techniques)} techniques") + + if not group_name: + print("\n--- Available APT Groups (sample) ---") + for gid, g in list(groups.items())[:20]: + print(f" {g['id']:8s} {g['name']:30s} aliases={g['aliases'][:3]}") + sys.exit(0) + + target_group = None + for gid, g in groups.items(): + if (g["name"].lower() == group_name.lower() or + g["id"].lower() == group_name.lower() or + group_name.lower() in [a.lower() for a in g["aliases"]]): + target_group = (gid, g) + break + + if not target_group: + print(f"[!] Group '{group_name}' not found") + sys.exit(1) + + gid, ginfo = target_group + print(f"\n[*] Group: {ginfo['name']} ({ginfo['id']})") + print(f" Aliases: {', '.join(ginfo['aliases'][:5])}") + + group_techs = map_group_techniques(bundle, gid, techniques) + print(f" Techniques: {len(group_techs)}") + + heatmap = tactic_heatmap(group_techs) + print("\n--- Tactic Heatmap ---") + for tactic, count in heatmap.items(): + bar = "#" * count + print(f" {tactic:35s} {count:3d} {bar}") + + layer = build_navigator_layer(ginfo["name"], group_techs) + out_file = f"{ginfo['name'].replace(' ', '_')}_layer.json" + save_layer(layer, out_file) + + sample_rules = [{"technique_id": t["id"]} for t in group_techs[:len(group_techs)//2]] + gaps, coverage = detection_gap_analysis(group_techs, sample_rules) + print(f"\n--- Detection Gap Analysis (demo: {coverage}% coverage) ---") + for gap in gaps[:10]: + print(f" [GAP] {gap['technique_id']:12s} {gap['technique_name']}") diff --git a/personas/_shared/skills/analyzing-azure-activity-logs-for-threats/LICENSE b/personas/_shared/skills/analyzing-azure-activity-logs-for-threats/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-azure-activity-logs-for-threats/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-azure-activity-logs-for-threats/SKILL.md b/personas/_shared/skills/analyzing-azure-activity-logs-for-threats/SKILL.md new file mode 100644 index 0000000..e578391 --- /dev/null +++ b/personas/_shared/skills/analyzing-azure-activity-logs-for-threats/SKILL.md @@ -0,0 +1,78 @@ +--- +name: analyzing-azure-activity-logs-for-threats +description: 'Queries Azure Monitor activity logs and sign-in logs via azure-monitor-query to detect suspicious administrative + operations, impossible travel, privilege escalation, and resource modifications. Builds KQL queries for threat hunting in + Azure environments. Use when investigating suspicious Azure tenant activity or building cloud SIEM detections. + + ' +domain: cybersecurity +subdomain: security-operations +tags: +- analyzing +- azure +- activity +- logs +version: '1.0' +author: mahipal +license: Apache-2.0 +nist_csf: +- DE.CM-01 +- RS.MA-01 +- GV.OV-01 +- DE.AE-02 +--- + +# Analyzing Azure Activity Logs for Threats + + +## When to Use + +- When investigating security incidents that require analyzing azure activity logs for threats +- When building detection rules or threat hunting queries for this domain +- When SOC analysts need structured procedures for this analysis type +- When validating security monitoring coverage for related attack techniques + +## Prerequisites + +- Familiarity with security operations concepts and tools +- Access to a test or lab environment for safe execution +- Python 3.8+ with required dependencies installed +- Appropriate authorization for any testing activities + +## Instructions + +Use azure-monitor-query to execute KQL queries against Azure Log Analytics workspaces, +detecting suspicious admin operations and sign-in anomalies. + +```python +from azure.identity import DefaultAzureCredential +from azure.monitor.query import LogsQueryClient +from datetime import timedelta + +credential = DefaultAzureCredential() +client = LogsQueryClient(credential) + +response = client.query_workspace( + workspace_id="WORKSPACE_ID", + query="AzureActivity | where OperationNameValue has 'MICROSOFT.AUTHORIZATION/ROLEASSIGNMENTS/WRITE' | take 10", + timespan=timedelta(hours=24), +) +``` + +Key detection queries: +1. Role assignment changes (privilege escalation) +2. Resource group and subscription modifications +3. Key vault secret access from new IPs +4. Network security group rule changes +5. Conditional access policy modifications + +## Examples + +```python +# Detect new Global Admin role assignments +query = ''' +AuditLogs +| where OperationName == "Add member to role" +| where TargetResources[0].modifiedProperties[0].newValue has "Global Administrator" +''' +``` diff --git a/personas/_shared/skills/analyzing-azure-activity-logs-for-threats/references/api-reference.md b/personas/_shared/skills/analyzing-azure-activity-logs-for-threats/references/api-reference.md new file mode 100644 index 0000000..69df600 --- /dev/null +++ b/personas/_shared/skills/analyzing-azure-activity-logs-for-threats/references/api-reference.md @@ -0,0 +1,54 @@ +# API Reference: Analyzing Azure Activity Logs for Threats + +## azure-monitor-query + +```python +from azure.identity import DefaultAzureCredential +from azure.monitor.query import LogsQueryClient, LogsQueryStatus +from datetime import timedelta + +credential = DefaultAzureCredential() +client = LogsQueryClient(credential) + +response = client.query_workspace( + workspace_id="WORKSPACE_ID", + query="AzureActivity | take 10", + timespan=timedelta(hours=24), +) +if response.status == LogsQueryStatus.SUCCESS: + for table in response.tables: + columns = [col.name for col in table.columns] + for row in table.rows: + print(dict(zip(columns, row))) +``` + +## Key Azure Log Tables + +| Table | Content | +|-------|---------| +| `AzureActivity` | Control plane operations (ARM) | +| `SigninLogs` | Azure AD sign-in events | +| `AuditLogs` | Azure AD audit trail | +| `AzureDiagnostics` | Resource diagnostics (Key Vault, NSG) | +| `SecurityAlert` | Defender for Cloud alerts | + +## Threat Detection KQL Patterns + +```kql +// Privilege escalation +AzureActivity | where OperationNameValue has "ROLEASSIGNMENTS/WRITE" + +// Impossible travel +SigninLogs | where ResultType == 0 +| extend Distance = geo_distance_2points(...) + +// Mass deletion +AzureActivity | where OperationNameValue endswith "/DELETE" +| summarize count() by Caller, bin(TimeGenerated, 1h) +``` + +### References + +- azure-monitor-query: https://pypi.org/project/azure-monitor-query/ +- KQL reference: https://learn.microsoft.com/en-us/azure/data-explorer/kusto/query/ +- Azure Activity Log schema: https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/activity-log-schema diff --git a/personas/_shared/skills/analyzing-azure-activity-logs-for-threats/scripts/agent.py b/personas/_shared/skills/analyzing-azure-activity-logs-for-threats/scripts/agent.py new file mode 100644 index 0000000..6777f7b --- /dev/null +++ b/personas/_shared/skills/analyzing-azure-activity-logs-for-threats/scripts/agent.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +"""Agent for analyzing Azure activity logs for threat detection.""" + +import os +import json +import argparse +from datetime import datetime, timedelta + +from azure.identity import DefaultAzureCredential, ClientSecretCredential +from azure.monitor.query import LogsQueryClient, LogsQueryStatus + + +def get_credential(tenant_id=None, client_id=None, client_secret=None): + """Get Azure credential.""" + if client_id and client_secret and tenant_id: + return ClientSecretCredential(tenant_id, client_id, client_secret) + return DefaultAzureCredential() + + +def run_kql(credential, workspace_id, query, hours=24): + """Execute KQL query against Log Analytics workspace.""" + client = LogsQueryClient(credential) + response = client.query_workspace( + workspace_id, query, timespan=timedelta(hours=hours) + ) + rows = [] + if response.status == LogsQueryStatus.SUCCESS: + for table in response.tables: + columns = [col.name for col in table.columns] + for row in table.rows: + rows.append(dict(zip(columns, row))) + return rows + + +def detect_privilege_escalation(credential, workspace_id): + """Detect role assignment changes indicating privilege escalation.""" + query = """ + AzureActivity + | where OperationNameValue has_any ( + "MICROSOFT.AUTHORIZATION/ROLEASSIGNMENTS/WRITE", + "MICROSOFT.AUTHORIZATION/ROLEDEFINITIONS/WRITE" + ) + | where ActivityStatusValue == "Success" + | project TimeGenerated, Caller, CallerIpAddress, + OperationNameValue, ResourceGroup, Properties_d + | order by TimeGenerated desc + """ + return run_kql(credential, workspace_id, query) + + +def detect_nsg_changes(credential, workspace_id): + """Detect Network Security Group rule modifications.""" + query = """ + AzureActivity + | where OperationNameValue has_any ( + "MICROSOFT.NETWORK/NETWORKSECURITYGROUPS/SECURITYRULES/WRITE", + "MICROSOFT.NETWORK/NETWORKSECURITYGROUPS/SECURITYRULES/DELETE" + ) + | where ActivityStatusValue == "Success" + | project TimeGenerated, Caller, CallerIpAddress, + OperationNameValue, ResourceGroup + | order by TimeGenerated desc + """ + return run_kql(credential, workspace_id, query) + + +def detect_keyvault_access(credential, workspace_id): + """Detect Key Vault secret access from unusual sources.""" + query = """ + AzureDiagnostics + | where ResourceProvider == "MICROSOFT.KEYVAULT" + | where OperationName in ("SecretGet", "SecretList", "SecretSet") + | summarize AccessCount = count(), DistinctIPs = dcount(CallerIPAddress), + IPList = make_set(CallerIPAddress, 10) + by identity_claim_upn_s, OperationName, Resource + | where DistinctIPs > 2 or AccessCount > 50 + | order by AccessCount desc + """ + return run_kql(credential, workspace_id, query) + + +def detect_impossible_travel(credential, workspace_id): + """Detect sign-ins from geographically distant locations in short time.""" + query = """ + SigninLogs + | where ResultType == 0 + | project TimeGenerated, UserPrincipalName, IPAddress, + Lat = toreal(LocationDetails.geoCoordinates.latitude), + Lon = toreal(LocationDetails.geoCoordinates.longitude) + | sort by UserPrincipalName asc, TimeGenerated asc + | extend PrevLat = prev(Lat), PrevLon = prev(Lon), + PrevTime = prev(TimeGenerated), PrevUser = prev(UserPrincipalName) + | where UserPrincipalName == PrevUser + | extend TimeDiffMin = datetime_diff('minute', TimeGenerated, PrevTime) + | where TimeDiffMin < 60 and TimeDiffMin > 0 + | extend DistKm = geo_distance_2points(Lon, Lat, PrevLon, PrevLat) / 1000 + | where DistKm > 500 + | project TimeGenerated, UserPrincipalName, IPAddress, DistKm, TimeDiffMin + """ + return run_kql(credential, workspace_id, query) + + +def detect_resource_deletion(credential, workspace_id): + """Detect mass resource deletion events.""" + query = """ + AzureActivity + | where OperationNameValue endswith "/DELETE" + | where ActivityStatusValue == "Success" + | summarize DeleteCount = count(), Resources = make_set(Resource, 20) + by Caller, bin(TimeGenerated, 1h) + | where DeleteCount > 10 + | order by DeleteCount desc + """ + return run_kql(credential, workspace_id, query) + + +def detect_conditional_access_changes(credential, workspace_id): + """Detect modifications to Conditional Access policies.""" + query = """ + AuditLogs + | where OperationName has_any ( + "Update conditional access policy", + "Delete conditional access policy" + ) + | project TimeGenerated, InitiatedBy, OperationName, + TargetResources, Result + | order by TimeGenerated desc + """ + return run_kql(credential, workspace_id, query) + + +def main(): + parser = argparse.ArgumentParser(description="Azure Activity Log Threat Detection Agent") + parser.add_argument("--workspace-id", default=os.getenv("AZURE_WORKSPACE_ID")) + parser.add_argument("--tenant-id", default=os.getenv("AZURE_TENANT_ID")) + parser.add_argument("--client-id", default=os.getenv("AZURE_CLIENT_ID")) + parser.add_argument("--client-secret", default=os.getenv("AZURE_CLIENT_SECRET")) + parser.add_argument("--output", default="azure_threat_report.json") + parser.add_argument("--action", choices=[ + "privesc", "nsg", "keyvault", "travel", "deletion", "full_hunt" + ], default="full_hunt") + args = parser.parse_args() + + cred = get_credential(args.tenant_id, args.client_id, args.client_secret) + report = {"generated_at": datetime.utcnow().isoformat(), "findings": {}} + + if args.action in ("privesc", "full_hunt"): + results = detect_privilege_escalation(cred, args.workspace_id) + report["findings"]["privilege_escalation"] = results + print(f"[+] Privilege escalation events: {len(results)}") + + if args.action in ("nsg", "full_hunt"): + results = detect_nsg_changes(cred, args.workspace_id) + report["findings"]["nsg_changes"] = results + print(f"[+] NSG changes: {len(results)}") + + if args.action in ("keyvault", "full_hunt"): + results = detect_keyvault_access(cred, args.workspace_id) + report["findings"]["keyvault_anomalies"] = results + print(f"[+] Key Vault anomalies: {len(results)}") + + if args.action in ("travel", "full_hunt"): + results = detect_impossible_travel(cred, args.workspace_id) + report["findings"]["impossible_travel"] = results + print(f"[+] Impossible travel: {len(results)}") + + if args.action in ("deletion", "full_hunt"): + results = detect_resource_deletion(cred, args.workspace_id) + report["findings"]["mass_deletion"] = results + print(f"[+] Mass deletion events: {len(results)}") + + with open(args.output, "w") as f: + json.dump(report, f, indent=2, default=str) + print(f"[+] Report saved to {args.output}") + + +if __name__ == "__main__": + main() diff --git a/personas/_shared/skills/analyzing-bootkit-and-rootkit-samples/LICENSE b/personas/_shared/skills/analyzing-bootkit-and-rootkit-samples/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-bootkit-and-rootkit-samples/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-bootkit-and-rootkit-samples/SKILL.md b/personas/_shared/skills/analyzing-bootkit-and-rootkit-samples/SKILL.md new file mode 100644 index 0000000..d8f381e --- /dev/null +++ b/personas/_shared/skills/analyzing-bootkit-and-rootkit-samples/SKILL.md @@ -0,0 +1,354 @@ +--- +name: analyzing-bootkit-and-rootkit-samples +description: 'Analyzes bootkit and advanced rootkit malware that infects the Master Boot Record (MBR), Volume Boot Record + (VBR), or UEFI firmware to gain persistence below the operating system. Covers boot sector analysis, UEFI module inspection, + and anti-rootkit detection techniques. Activates for requests involving bootkit analysis, MBR malware investigation, UEFI + persistence analysis, or pre-OS malware detection. + + ' +domain: cybersecurity +subdomain: malware-analysis +tags: +- malware +- bootkit +- rootkit +- UEFI +- MBR-analysis +version: 1.0.0 +author: mahipal +license: Apache-2.0 +nist_csf: +- DE.AE-02 +- RS.AN-03 +- ID.RA-01 +- DE.CM-01 +--- + +# Analyzing Bootkit and Rootkit Samples + +## When to Use + +- A system shows signs of compromise that persist through OS reinstallation +- Antivirus and EDR are unable to detect malware despite clear evidence of compromise +- UEFI Secure Boot has been disabled or shows integrity violations +- Memory forensics reveals rootkit behavior (hidden processes, hooked system calls) +- Investigating nation-state level threats known to deploy bootkits (APT28, APT41, Equation Group) + +**Do not use** for standard user-mode malware; bootkits and rootkits operate at a fundamentally different level requiring specialized analysis techniques. + +## Prerequisites + +- Disk imaging tools (dd, FTK Imager) for acquiring MBR/VBR sectors +- UEFITool for UEFI firmware volume analysis and module extraction +- chipsec for hardware-level firmware security assessment +- Ghidra with x86 real-mode and 16-bit support for MBR code analysis +- Volatility 3 for kernel-level rootkit artifact detection +- Bootable Linux live USB for offline system analysis + +## Workflow + +### Step 1: Acquire Boot Sectors and Firmware + +Extract MBR, VBR, and UEFI firmware for offline analysis: + +```bash +# Acquire MBR (first 512 bytes of disk) +dd if=/dev/sda of=mbr.bin bs=512 count=1 + +# Acquire first track (usually contains bootkit code beyond MBR) +dd if=/dev/sda of=first_track.bin bs=512 count=63 + +# Acquire VBR (Volume Boot Record - first sector of partition) +dd if=/dev/sda1 of=vbr.bin bs=512 count=1 + +# Acquire UEFI System Partition +mkdir /mnt/efi +mount /dev/sda1 /mnt/efi +cp -r /mnt/efi/EFI /analysis/efi_backup/ + +# Dump UEFI firmware (requires chipsec or flashrom) +# Using chipsec: +python chipsec_util.py spi dump firmware.rom + +# Using flashrom: +flashrom -p internal -r firmware.rom + +# Verify firmware dump integrity +sha256sum firmware.rom +``` + +### Step 2: Analyze MBR/VBR for Bootkit Code + +Examine boot sector code for malicious modifications: + +```bash +# Disassemble MBR code (16-bit real mode) +ndisasm -b16 mbr.bin > mbr_disasm.txt + +# Compare MBR with known-good Windows MBR +# Standard Windows MBR begins with: EB 5A 90 (JMP 0x5C, NOP) +# Standard Windows 10 MBR: 33 C0 8E D0 BC 00 7C (XOR AX,AX; MOV SS,AX; MOV SP,7C00h) + +python3 << 'PYEOF' +with open("mbr.bin", "rb") as f: + mbr = f.read() + +# Check MBR signature (bytes 510-511 should be 0x55AA) +if mbr[510:512] == b'\x55\xAA': + print("[*] Valid MBR signature (0x55AA)") +else: + print("[!] Invalid MBR signature") + +# Check for known bootkit signatures +bootkit_sigs = { + b'\xE8\x00\x00\x5E\x81\xEE': "TDL4/Alureon bootkit", + b'\xFA\x33\xC0\x8E\xD0\xBC\x00\x7C\x8B\xF4\x50\x07': "Standard Windows MBR (clean)", + b'\xEB\x5A\x90\x4E\x54\x46\x53': "Standard NTFS VBR (clean)", +} + +for sig, name in bootkit_sigs.items(): + if sig in mbr: + print(f"[{'!' if 'clean' not in name else '*'}] Signature match: {name}") + +# Check partition table entries +print("\nPartition Table:") +for i in range(4): + offset = 446 + (i * 16) + entry = mbr[offset:offset+16] + if entry != b'\x00' * 16: + boot_flag = "Active" if entry[0] == 0x80 else "Inactive" + part_type = entry[4] + start_lba = int.from_bytes(entry[8:12], 'little') + size_lba = int.from_bytes(entry[12:16], 'little') + print(f" Partition {i+1}: Type=0x{part_type:02X} {boot_flag} Start=LBA {start_lba} Size={size_lba} sectors") +PYEOF +``` + +### Step 3: Analyze UEFI Firmware for Implants + +Inspect UEFI firmware volumes for unauthorized modules: + +```bash +# Extract UEFI firmware components with UEFITool +# GUI: Open firmware.rom -> Inspect firmware volumes +# CLI: +UEFIExtract firmware.rom all + +# List all DXE drivers (most common target for UEFI implants) +find firmware.rom.dump -name "*.efi" -exec file {} \; + +# Compare against known-good firmware module list +# Each UEFI module has a GUID - compare against vendor baseline + +# Verify Secure Boot configuration +python chipsec_main.py -m common.secureboot.variables + +# Check SPI flash write protection +python chipsec_main.py -m common.bios_wp + +# Check for known UEFI malware patterns +yara -r uefi_malware.yar firmware.rom +``` + +``` +Known UEFI Bootkit Detection Points: +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +LoJax (APT28): + - Modified SPI flash + - Added DXE driver that drops agent to Windows + - Persists through OS reinstall and disk replacement + +BlackLotus: + - Exploits CVE-2022-21894 to bypass Secure Boot + - Modifies EFI System Partition bootloader + - Installs kernel driver during boot + +CosmicStrand: + - Modifies CORE_DXE firmware module + - Hooks kernel initialization during boot + - Drops shellcode into Windows kernel memory + +MoonBounce: + - SPI flash implant in CORE_DXE module + - Modified GetVariable() function + - Deploys user-mode implant through boot chain + +ESPecter: + - Modifies Windows Boot Manager on ESP + - Patches winload.efi to disable DSE + - Loads unsigned kernel driver +``` + +### Step 4: Detect Kernel-Level Rootkit Behavior + +Analyze the running system for rootkit artifacts: + +```bash +# Memory forensics for rootkit detection +# SSDT hook detection +vol3 -f memory.dmp windows.ssdt | grep -v "ntoskrnl\|win32k" + +# Hidden processes (DKOM) +vol3 -f memory.dmp windows.psscan > psscan.txt +vol3 -f memory.dmp windows.pslist > pslist.txt +# Diff to find hidden processes + +# Kernel callback registration (rootkits register callbacks for filtering) +vol3 -f memory.dmp windows.callbacks + +# Driver analysis +vol3 -f memory.dmp windows.driverscan +vol3 -f memory.dmp windows.modules + +# Check for unsigned drivers +vol3 -f memory.dmp windows.driverscan | while read line; do + driver_path=$(echo "$line" | awk '{print $NF}') + if [ -f "$driver_path" ]; then + sigcheck -nobanner "$driver_path" 2>/dev/null | grep "Unsigned" + fi +done + +# IDT hook detection +vol3 -f memory.dmp windows.idt +``` + +### Step 5: Boot Process Integrity Verification + +Verify the integrity of the entire boot chain: + +```bash +# Verify Windows Boot Manager signature +sigcheck -a C:\Windows\Boot\EFI\bootmgfw.efi + +# Verify winload.efi +sigcheck -a C:\Windows\System32\winload.efi + +# Verify ntoskrnl.exe +sigcheck -a C:\Windows\System32\ntoskrnl.exe + +# Check Measured Boot logs (if TPM is available) +# Windows: BCDEdit /enum firmware +bcdedit /enum firmware + +# Verify Secure Boot state +Confirm-SecureBootUEFI # PowerShell cmdlet + +# Check boot configuration for tampering +bcdedit /v + +# Look for boot configuration changes +# testsigning: should be No +# nointegritychecks: should be No +# debug: should be No +bcdedit | findstr /i "testsigning nointegritychecks debug" +``` + +### Step 6: Document Bootkit/Rootkit Analysis + +Compile comprehensive analysis findings: + +``` +Analysis should document: +- Boot sector (MBR/VBR) integrity status with hex comparison +- UEFI firmware module inventory and integrity verification +- Secure Boot status and any bypass mechanisms detected +- Kernel-level hooks (SSDT, IDT, IRP, inline) identified +- Hidden processes, drivers, and files discovered +- Persistence mechanism (SPI flash, ESP, MBR, kernel driver) +- Boot chain integrity verification results +- Attribution to known bootkit families if possible +- Remediation steps (reflash firmware, rebuild MBR, replace hardware) +``` + +## Key Concepts + +| Term | Definition | +|------|------------| +| **Bootkit** | Malware that infects the boot process (MBR, VBR, UEFI) to execute before the operating system loads, gaining persistent low-level control | +| **MBR (Master Boot Record)** | First 512 bytes of a disk containing bootstrap code and partition table; MBR bootkits replace this code with malicious loaders | +| **UEFI (Unified Extensible Firmware Interface)** | Modern firmware interface replacing BIOS; UEFI bootkits implant malicious modules in firmware volumes or modify the ESP | +| **Secure Boot** | UEFI security feature verifying digital signatures of boot components; bootkits like BlackLotus exploit vulnerabilities to bypass it | +| **SPI Flash** | Flash memory chip storing UEFI firmware; advanced bootkits like LoJax and MoonBounce modify SPI flash for firmware-level persistence | +| **DKOM (Direct Kernel Object Manipulation)** | Rootkit technique modifying kernel structures to hide processes, files, and network connections without hooking functions | +| **Driver Signature Enforcement (DSE)** | Windows security feature requiring kernel drivers to be digitally signed; bootkits disable DSE during boot to load unsigned rootkit drivers | + +## Tools & Systems + +- **UEFITool**: Open-source UEFI firmware image editor and parser for inspecting firmware volumes, drivers, and modules +- **chipsec**: Intel hardware security assessment framework for verifying SPI flash protection, Secure Boot, and UEFI configuration +- **Volatility**: Memory forensics framework with SSDT, IDT, callback, and driver analysis plugins for kernel rootkit detection +- **GMER**: Windows rootkit detection tool scanning for SSDT hooks, IDT hooks, hidden processes, and modified kernel modules +- **Bootkits Analyzer**: Specialized tool for analyzing MBR/VBR code including disassembly and comparison against known-good baselines + +## Common Scenarios + +### Scenario: Investigating Persistent Compromise Surviving OS Reinstallation + +**Context**: An organization reimaged a compromised workstation, but the same C2 beaconing resumed within hours. Standard disk forensics finds no malware. UEFI bootkit is suspected. + +**Approach**: +1. Boot from a Linux live USB to avoid executing any compromised OS components +2. Dump the SPI flash firmware using chipsec or flashrom for offline analysis +3. Dump the MBR and VBR sectors with dd for boot sector analysis +4. Copy the EFI System Partition for bootloader integrity verification +5. Open the SPI dump in UEFITool and compare module GUIDs against vendor-provided firmware +6. Look for additional or modified DXE drivers that should not be present +7. Analyze any suspicious modules with Ghidra (x86_64 UEFI module format) +8. Verify Secure Boot configuration and check for exploit-based bypasses + +**Pitfalls**: +- Analyzing the system while the compromised OS is running (rootkit may hide from live analysis) +- Not checking SPI flash (only analyzing disk-based boot components misses firmware-level implants) +- Assuming Secure Boot prevents all bootkits (known bypasses exist, e.g., CVE-2022-21894) +- Not preserving the original firmware dump before reflashing (critical evidence for attribution) + +## Output Format + +``` +BOOTKIT / ROOTKIT ANALYSIS REPORT +==================================== +System: Dell OptiPlex 7090 (UEFI, TPM 2.0) +Firmware Version: 1.15.0 (Dell) +Secure Boot: ENABLED (but bypassed) +Capture Method: Linux Live USB + chipsec SPI dump + +MBR/VBR ANALYSIS +MBR Signature: Valid (0x55AA) +MBR Code: MATCHES standard Windows 10 MBR (clean) +VBR Code: MATCHES standard NTFS VBR (clean) + +UEFI FIRMWARE ANALYSIS +Total Modules: 287 +Vendor Expected: 285 +Extra Modules: 2 UNAUTHORIZED + [!] DXE Driver GUID: {ABCD1234-...} "SmmAccessDxe_mod" (MODIFIED) + Original Size: 12,288 bytes + Current Size: 45,056 bytes (32KB ADDED) + Entropy: 7.82 (HIGH - encrypted payload) + + [!] DXE Driver GUID: {EFGH5678-...} "UefiPayloadDxe" (NEW - not in vendor firmware) + Size: 28,672 bytes + Function: Drops persistence agent during boot + +BOOT CHAIN INTEGRITY +bootmgfw.efi: MODIFIED (hash mismatch, Secure Boot bypass via CVE-2022-21894) +winload.efi: MODIFIED (DSE disabled at load time) +ntoskrnl.exe: CLEAN (but unsigned driver loaded after boot) + +KERNEL ROOTKIT COMPONENTS +Driver: C:\Windows\System32\drivers\null_mod.sys (unsigned, hidden) +SSDT Hooks: 3 (NtQuerySystemInformation, NtQueryDirectoryFile, NtDeviceIoControlFile) +Hidden Processes: 2 (PID 6784: beacon.exe, PID 6812: keylog.exe) +Hidden Files: C:\Windows\System32\drivers\null_mod.sys + +ATTRIBUTION +Family: BlackLotus variant +Confidence: HIGH (CVE-2022-21894 exploit, ESP modification pattern matches) + +REMEDIATION +1. Reflash SPI firmware with clean vendor image via hardware programmer +2. Rebuild EFI System Partition from clean Windows installation media +3. Reinstall OS from verified media +4. Enable all firmware write protections +5. Update firmware to latest version (patches CVE-2022-21894) +``` diff --git a/personas/_shared/skills/analyzing-bootkit-and-rootkit-samples/references/api-reference.md b/personas/_shared/skills/analyzing-bootkit-and-rootkit-samples/references/api-reference.md new file mode 100644 index 0000000..4bd7ceb --- /dev/null +++ b/personas/_shared/skills/analyzing-bootkit-and-rootkit-samples/references/api-reference.md @@ -0,0 +1,97 @@ +# API Reference: Bootkit and Rootkit Analysis Tools + +## dd - Boot Sector Extraction + +### Syntax +```bash +dd if=/dev/sda of=mbr.bin bs=512 count=1 # MBR +dd if=/dev/sda of=first_track.bin bs=512 count=63 # First track +dd if=/dev/sda1 of=vbr.bin bs=512 count=1 # VBR +``` + +## ndisasm - 16-bit Disassembly + +### Syntax +```bash +ndisasm -b16 mbr.bin > mbr_disasm.txt +ndisasm -b16 -o 0x7C00 mbr.bin # Set origin to MBR load address +``` + +### Key Flags +| Flag | Description | +|------|-------------| +| `-b16` | 16-bit real-mode disassembly | +| `-b32` | 32-bit protected-mode | +| `-o` | Origin address offset | + +## UEFITool - Firmware Analysis + +### CLI Syntax +```bash +UEFIExtract firmware.rom all # Extract all modules +UEFIExtract firmware.rom body # Extract specific module body +``` + +### Output +Extracts firmware volumes into a directory tree with each DXE driver, PEI module, and option ROM as separate files identified by GUID. + +## chipsec - Hardware Security Assessment + +### Syntax +```bash +python chipsec_main.py -m common.secureboot.variables # Check Secure Boot +python chipsec_main.py -m common.bios_wp # SPI write protection +python chipsec_main.py -m common.spi_lock # SPI lock status +python chipsec_util.py spi dump firmware.rom # Dump SPI flash +``` + +### Key Modules +| Module | Purpose | +|--------|---------| +| `common.secureboot.variables` | Verify Secure Boot configuration | +| `common.bios_wp` | Check BIOS write protection | +| `common.spi_lock` | Verify SPI flash lock bits | +| `common.smm` | SMM protection verification | + +## Volatility 3 - Rootkit Detection Plugins + +### Syntax +```bash +vol3 -f memory.dmp +``` + +### Rootkit Detection Plugins +| Plugin | Purpose | +|--------|---------| +| `windows.ssdt` | System Service Descriptor Table hooks | +| `windows.callbacks` | Kernel callback registrations | +| `windows.driverscan` | Scan for driver objects | +| `windows.modules` | List loaded kernel modules | +| `windows.psscan` | Pool-tag scan for processes (finds hidden) | +| `windows.pslist` | Active process list (DKOM-affected) | +| `windows.idt` | Interrupt Descriptor Table hooks | + +### Output Format +``` +Offset Order Module Section Owner +------- ----- ------ ------- ----- +0x... 0 ntoskrnl.exe .text ntoskrnl.exe +0x... 73 UNKNOWN - rootkit.sys ← suspicious +``` + +## flashrom - SPI Flash Dumping + +### Syntax +```bash +flashrom -p internal -r firmware.rom # Read/dump +flashrom -p internal -w clean.rom # Write/reflash +flashrom -p internal --verify clean.rom # Verify flash contents +``` + +## YARA - Firmware Pattern Scanning + +### Syntax +```bash +yara -r uefi_malware.yar firmware.rom +yara -s -r rules.yar firmware.rom # Show matching strings +``` diff --git a/personas/_shared/skills/analyzing-bootkit-and-rootkit-samples/scripts/agent.py b/personas/_shared/skills/analyzing-bootkit-and-rootkit-samples/scripts/agent.py new file mode 100644 index 0000000..1f0b36a --- /dev/null +++ b/personas/_shared/skills/analyzing-bootkit-and-rootkit-samples/scripts/agent.py @@ -0,0 +1,199 @@ +#!/usr/bin/env python3 +"""Bootkit and rootkit analysis agent for MBR/VBR/UEFI inspection and rootkit detection.""" + +import struct +import hashlib +import os +import sys +import subprocess +import math +from collections import Counter + + +def read_mbr(disk_path_or_file): + """Read and parse the first 512 bytes (MBR) from a disk image or device.""" + with open(disk_path_or_file, "rb") as f: + mbr = f.read(512) + return mbr + + +def validate_mbr_signature(mbr_data): + """Check the MBR boot signature at bytes 510-511 (should be 0x55AA).""" + sig = mbr_data[510:512] + valid = sig == b"\x55\xAA" + return valid, sig.hex() + + +def parse_partition_table(mbr_data): + """Parse the four 16-byte partition table entries starting at offset 446.""" + partitions = [] + for i in range(4): + offset = 446 + (i * 16) + entry = mbr_data[offset:offset + 16] + if entry == b"\x00" * 16: + continue + boot_flag = entry[0] + part_type = entry[4] + start_lba = struct.unpack_from(" 6.5, + "suspicious_patterns": suspicious_patterns, + } + + +def run_volatility_rootkit_scan(memory_dump, plugin): + """Run a Volatility 3 plugin for rootkit detection via subprocess.""" + result = subprocess.run( + ["vol3", "-f", memory_dump, plugin], + capture_output=True, text=True, + timeout=120, + ) + return result.stdout, result.stderr, result.returncode + + +def detect_kernel_rootkit(memory_dump): + """Run multiple Volatility plugins to detect kernel-level rootkit artifacts.""" + plugins = [ + "windows.ssdt", + "windows.callbacks", + "windows.driverscan", + "windows.modules", + "windows.psscan", + "windows.pslist", + ] + results = {} + for plugin in plugins: + stdout, stderr, rc = run_volatility_rootkit_scan(memory_dump, plugin) + results[plugin] = {"output": stdout, "error": stderr, "return_code": rc} + return results + + +def compare_process_lists(pslist_output, psscan_output): + """Compare pslist and psscan output to find hidden processes (DKOM).""" + pslist_pids = set() + psscan_pids = set() + for line in pslist_output.splitlines(): + parts = line.split() + if len(parts) >= 2 and parts[1].isdigit(): + pslist_pids.add(int(parts[1])) + for line in psscan_output.splitlines(): + parts = line.split() + if len(parts) >= 2 and parts[1].isdigit(): + psscan_pids.add(int(parts[1])) + hidden = psscan_pids - pslist_pids + return hidden + + +if __name__ == "__main__": + print("=" * 60) + print("Bootkit & Rootkit Analysis Agent") + print("MBR/VBR inspection, UEFI firmware analysis, rootkit detection") + print("=" * 60) + + # Demo with a sample MBR file if available + demo_mbr = "mbr.bin" + if len(sys.argv) > 1: + demo_mbr = sys.argv[1] + + if os.path.exists(demo_mbr): + print(f"\n[*] Analyzing: {demo_mbr}") + mbr = read_mbr(demo_mbr) + valid, sig_hex = validate_mbr_signature(mbr) + print(f"[*] MBR Signature: 0x{sig_hex.upper()} ({'Valid' if valid else 'INVALID'})") + + partitions = parse_partition_table(mbr) + print(f"[*] Partition entries: {len(partitions)}") + for p in partitions: + active = "Active" if p["active"] else "Inactive" + print(f" Part {p['index']}: Type={p['type_id']} {active} " + f"Start=LBA {p['start_lba']} Size={p['size_mb']} MB") + + sigs = scan_bootkit_signatures(mbr) + for s in sigs: + tag = "[*]" if s["clean"] else "[!]" + print(f"{tag} Signature match: {s['signature']} at offset {s['offset']}") + + analysis = analyze_boot_code(mbr) + print(f"[*] Boot code entropy: {analysis['entropy']}" + f" ({'HIGH - possible encryption' if analysis['high_entropy'] else 'Normal'})") + print(f"[*] Boot code SHA-256: {analysis['sha256']}") + for pat in analysis["suspicious_patterns"]: + print(f"[!] {pat}") + else: + print(f"\n[DEMO] No MBR file provided. Usage: {sys.argv[0]} ") + print("[DEMO] Provide a 512-byte MBR dump or disk device for analysis.") + print("\n[*] Supported analysis:") + print(" - MBR/VBR signature validation and bootkit detection") + print(" - Partition table parsing and anomaly detection") + print(" - Boot code entropy and pattern analysis") + print(" - Volatility-based kernel rootkit detection (SSDT, callbacks, DKOM)") + print(" - UEFI firmware module inspection via chipsec subprocess") diff --git a/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/LICENSE b/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/SKILL.md b/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/SKILL.md new file mode 100644 index 0000000..b6a9400 --- /dev/null +++ b/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/SKILL.md @@ -0,0 +1,296 @@ +--- +name: analyzing-browser-forensics-with-hindsight +description: Analyze Chromium-based browser artifacts using Hindsight to extract browsing history, downloads, cookies, cached + content, autofill data, saved passwords, and browser extensions from Chrome, Edge, Brave, and Opera for forensic investigation. +domain: cybersecurity +subdomain: digital-forensics +tags: +- browser-forensics +- hindsight +- chrome-forensics +- chromium +- edge +- browsing-history +- cookies +- downloads +- cache +- web-artifacts +version: '1.0' +author: mahipal +license: Apache-2.0 +nist_csf: +- RS.AN-01 +- RS.AN-03 +- DE.AE-02 +- RS.MA-01 +--- + +# Analyzing Browser Forensics with Hindsight + +## Overview + +Hindsight is an open-source browser forensics tool designed to parse artifacts from Google Chrome and other Chromium-based browsers (Microsoft Edge, Brave, Opera, Vivaldi). It extracts and correlates data from multiple browser database files to create a unified timeline of web activity. Hindsight can parse URLs, download history, cache records, bookmarks, autofill records, saved passwords, preferences, browser extensions, HTTP cookies, Local Storage (HTML5 cookies), login data, and session/tab information. The tool produces chronological timelines in multiple output formats (XLSX, JSON, SQLite) that enable investigators to reconstruct user web activity for incident response, insider threat investigations, and criminal cases. + + +## When to Use + +- When investigating security incidents that require analyzing browser forensics with hindsight +- When building detection rules or threat hunting queries for this domain +- When SOC analysts need structured procedures for this analysis type +- When validating security monitoring coverage for related attack techniques + +## Prerequisites + +- Python 3.8+ with Hindsight installed (`pip install pyhindsight`) +- Access to browser profile directories from forensic image +- Browser profile data (not encrypted with OS-level encryption) +- Timeline Explorer or spreadsheet application for analysis + +## Browser Profile Locations + +| Browser | Windows Profile Path | +|---------|---------------------| +| Chrome | %LOCALAPPDATA%\Google\Chrome\User Data\Default\ | +| Edge | %LOCALAPPDATA%\Microsoft\Edge\User Data\Default\ | +| Brave | %LOCALAPPDATA%\BraveSoftware\Brave-Browser\User Data\Default\ | +| Opera | %APPDATA%\Opera Software\Opera Stable\ | +| Vivaldi | %LOCALAPPDATA%\Vivaldi\User Data\Default\ | +| Chrome (macOS) | ~/Library/Application Support/Google/Chrome/Default/ | +| Chrome (Linux) | ~/.config/google-chrome/Default/ | + +## Key Artifact Files + +| File | Contents | +|------|----------| +| History | URL visits, downloads, keyword searches | +| Cookies | HTTP cookies with domain, expiry, values | +| Web Data | Autofill entries, saved credit cards | +| Login Data | Saved usernames/passwords (encrypted) | +| Bookmarks | JSON bookmark tree | +| Preferences | Browser configuration and extensions | +| Local Storage/ | HTML5 Local Storage per domain | +| Session Storage/ | Session-specific storage per domain | +| Network Action Predictor | Previously typed URLs | +| Shortcuts | Omnibox shortcuts and predictions | +| Top Sites | Frequently visited sites | + +## Running Hindsight + +### Command Line + +```bash +# Basic analysis of a Chrome profile +hindsight.exe -i "C:\Evidence\Users\suspect\AppData\Local\Google\Chrome\User Data\Default" -o C:\Output\chrome_analysis + +# Specify browser type +hindsight.exe -i "/path/to/profile" -o /output/analysis -b Chrome + +# JSON output format +hindsight.exe -i "C:\Evidence\Chrome\Default" -o C:\Output\chrome --format jsonl + +# With cache parsing (slower but more complete) +hindsight.exe -i "C:\Evidence\Chrome\Default" -o C:\Output\chrome --cache +``` + +### Web UI + +```bash +# Start Hindsight web interface +hindsight_gui.exe +# Navigate to http://localhost:8080 +# Upload or point to browser profile directory +# Configure output format and analysis options +# Generate and download report +``` + +## Artifact Analysis Details + +### URL History and Visits + +```sql +-- Chrome History database schema (key tables) +-- urls table: id, url, title, visit_count, typed_count, last_visit_time +-- visits table: id, url, visit_time, from_visit, transition, segment_id + +-- Timestamps are Chrome/WebKit format: microseconds since 1601-01-01 +-- Convert: datetime((visit_time/1000000)-11644473600, 'unixepoch') +``` + +### Download History + +```sql +-- downloads table: id, current_path, target_path, start_time, end_time, +-- received_bytes, total_bytes, state, danger_type, interrupt_reason, +-- url, referrer, tab_url, mime_type, original_mime_type +``` + +### Cookie Analysis + +```sql +-- cookies table: creation_utc, host_key, name, value, encrypted_value, +-- path, expires_utc, is_secure, is_httponly, last_access_utc, +-- has_expires, is_persistent, priority, samesite +``` + +## Python Analysis Script + +```python +import sqlite3 +import os +import json +import sys +from datetime import datetime, timedelta + + +CHROME_EPOCH = datetime(1601, 1, 1) + + +def chrome_time_to_datetime(chrome_ts: int): + """Convert Chrome timestamp to datetime.""" + if chrome_ts == 0: + return None + try: + return CHROME_EPOCH + timedelta(microseconds=chrome_ts) + except (OverflowError, OSError): + return None + + +def analyze_chrome_history(profile_path: str, output_dir: str) -> dict: + """Analyze Chrome History database for forensic evidence.""" + history_db = os.path.join(profile_path, "History") + if not os.path.exists(history_db): + return {"error": "History database not found"} + + os.makedirs(output_dir, exist_ok=True) + conn = sqlite3.connect(f"file:{history_db}?mode=ro", uri=True) + + # URL visits with timestamps + cursor = conn.cursor() + cursor.execute(""" + SELECT u.url, u.title, v.visit_time, u.visit_count, + v.transition & 0xFF as transition_type + FROM visits v JOIN urls u ON v.url = u.id + ORDER BY v.visit_time DESC LIMIT 5000 + """) + visits = [{ + "url": r[0], "title": r[1], + "visit_time": str(chrome_time_to_datetime(r[2])), + "total_visits": r[3], "transition": r[4] + } for r in cursor.fetchall()] + + # Downloads + cursor.execute(""" + SELECT target_path, tab_url, start_time, end_time, + received_bytes, total_bytes, mime_type, state + FROM downloads ORDER BY start_time DESC LIMIT 1000 + """) + downloads = [{ + "path": r[0], "source_url": r[1], + "start_time": str(chrome_time_to_datetime(r[2])), + "end_time": str(chrome_time_to_datetime(r[3])), + "received_bytes": r[4], "total_bytes": r[5], + "mime_type": r[6], "state": r[7] + } for r in cursor.fetchall()] + + # Keyword searches + cursor.execute(""" + SELECT k.term, u.url, k.url_id + FROM keyword_search_terms k JOIN urls u ON k.url_id = u.id + ORDER BY u.last_visit_time DESC LIMIT 1000 + """) + searches = [{"term": r[0], "url": r[1]} for r in cursor.fetchall()] + + conn.close() + + report = { + "analysis_timestamp": datetime.now().isoformat(), + "profile_path": profile_path, + "total_visits": len(visits), + "total_downloads": len(downloads), + "total_searches": len(searches), + "visits": visits, + "downloads": downloads, + "searches": searches + } + + report_path = os.path.join(output_dir, "browser_forensics.json") + with open(report_path, "w") as f: + json.dump(report, f, indent=2) + + return report + + +def main(): + if len(sys.argv) < 3: + print("Usage: python process.py ") + sys.exit(1) + analyze_chrome_history(sys.argv[1], sys.argv[2]) + + +if __name__ == "__main__": + main() +``` + +## References + +- Hindsight GitHub: https://github.com/obsidianforensics/hindsight +- Chrome Forensics Guide: https://allenace.medium.com/hindsight-chrome-forensics-made-simple-425db99fa5ed +- Browser Forensics Tools: https://www.cyberforensicacademy.com/blog/browser-forensics-tools-how-to-extract-user-activity +- Chromium Source (History): https://source.chromium.org/chromium/chromium/src/+/main:components/history/ + +## Example Output + +```text +$ python hindsight.py -i /evidence/chrome-profile -o /analysis/hindsight_output + +Hindsight v2024.01 - Chrome/Chromium Browser Forensic Analysis +================================================================ + +Profile: /evidence/chrome-profile (Chrome 120.0.6099.130) +OS: Windows 10 + +[+] Parsing History database... + URL records: 12,456 + Download records: 234 + Search terms: 567 + +[+] Parsing Cookies database... + Cookie records: 8,923 + Encrypted cookies: 6,712 + +[+] Parsing Web Data (Autofill)... + Autofill entries: 1,234 + Credit card entries: 2 (encrypted) + +[+] Parsing Login Data... + Saved credentials: 45 (encrypted) + +[+] Parsing Bookmarks... + Bookmark entries: 189 + +--- Browsing History (Last 10 Entries) --- +Timestamp (UTC) | URL | Title | Visit Count +2024-01-15 14:32:05.123 | https://mail.corporate.com/inbox | Corporate Mail | 45 +2024-01-15 14:33:12.456 | https://drive.google.com/file/d/1aBcDe... | Q4_Financial_Report.xlsx | 1 +2024-01-15 14:35:44.789 | https://mega.nz/folder/xYz123 | MEGA - Secure Cloud | 3 +2024-01-15 14:36:01.234 | https://mega.nz/folder/xYz123#upload | MEGA - Upload | 8 +2024-01-15 14:42:15.567 | https://pastebin.com/raw/kL9mN2pQ | Pastebin (raw) | 1 +2024-01-15 15:01:33.890 | https://192.168.1.50:8443/admin | Admin Panel | 12 +2024-01-15 15:15:22.111 | https://transfer.sh/upload | transfer.sh | 2 +2024-01-15 15:30:45.222 | https://vpn-gateway.corporate.com | VPN Login | 5 +2024-01-15 16:00:00.333 | https://whatismyipaddress.com | What Is My IP | 1 +2024-01-15 16:05:12.444 | https://protonmail.com/inbox | ProtonMail | 3 + +--- Downloads (Suspicious) --- +Timestamp (UTC) | Filename | URL Source | Size +2024-01-15 14:33:15.000 | Q4_Financial_Report.xlsm | https://phish-domain.com/docs/report | 245 KB +2024-01-15 14:34:02.000 | update_client.exe | https://cdn.evil-updates.com/client.exe | 1.2 MB + +--- Cookies (Session Tokens) --- +Domain | Name | Expires | Secure | HttpOnly +.corporate.com | SESSION_ID | 2024-01-16 14:32 | Yes | Yes +.mega.nz | session | Session | Yes | Yes +.protonmail.com | AUTH-TOKEN | 2024-02-15 00:00 | Yes | Yes + +Report saved to: /analysis/hindsight_output/Hindsight_Report.xlsx +``` diff --git a/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/assets/template.md b/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/assets/template.md new file mode 100644 index 0000000..e00e520 --- /dev/null +++ b/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/assets/template.md @@ -0,0 +1,22 @@ +# Browser Forensics Report +## Case Info +| Field | Value | +|-------|-------| +| Case Number | | +| Browser | | +| Profile Path | | +## Activity Summary +| Metric | Count | +|--------|-------| +| URL Visits | | +| Downloads | | +| Saved Passwords | | +| Cookies | | +## Notable URLs +| Timestamp | URL | Title | +|-----------|-----|-------| +| | | | +## Downloads +| Timestamp | File | Source URL | Size | +|-----------|------|-----------|------| +| | | | | diff --git a/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/references/api-reference.md b/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/references/api-reference.md new file mode 100644 index 0000000..85fe5f6 --- /dev/null +++ b/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/references/api-reference.md @@ -0,0 +1,92 @@ +# API Reference: Browser Forensics with Hindsight + +## Hindsight CLI + +### Syntax +```bash +hindsight.py -i # Analyze Chrome profile +hindsight.py -i -o # Save results +hindsight.py -i -f xlsx # Export as Excel +hindsight.py -i -f sqlite # Export as SQLite +hindsight.py -i -b # Specify browser type +``` + +### Browser Types +| Flag | Browser | +|------|---------| +| `Chrome` | Google Chrome | +| `Edge` | Microsoft Edge (Chromium) | +| `Brave` | Brave Browser | +| `Opera` | Opera (Chromium) | + +### Output Artifacts +| Table | Description | +|-------|-------------| +| `urls` | Browsing history with visit counts | +| `downloads` | File downloads with source URLs | +| `cookies` | Cookie values, domains, expiry | +| `autofill` | Form autofill entries | +| `bookmarks` | Saved bookmarks | +| `preferences` | Browser configuration | +| `local_storage` | Site local storage data | +| `login_data` | Saved credential metadata | +| `extensions` | Installed extensions with permissions | + +## Chrome SQLite Databases + +### History Database +```sql +-- Browsing history +SELECT u.url, u.title, v.visit_time, v.transition +FROM visits v JOIN urls u ON v.url = u.id +ORDER BY v.visit_time DESC; + +-- Downloads +SELECT target_path, tab_url, total_bytes, start_time, danger_type, mime_type +FROM downloads ORDER BY start_time DESC; +``` + +### Cookies Database +```sql +SELECT host_key, name, value, creation_utc, expires_utc, is_secure, is_httponly +FROM cookies ORDER BY creation_utc DESC; +``` + +### Web Data Database (Autofill) +```sql +SELECT name, value, count, date_created, date_last_used +FROM autofill ORDER BY date_last_used DESC; +``` + +## Chrome Timestamp Conversion + +### Format +Microseconds since January 1, 1601 (Windows FILETIME base) + +### Python Conversion +```python +import datetime +def chrome_to_datetime(chrome_time): + epoch = datetime.datetime(1601, 1, 1) + return epoch + datetime.timedelta(microseconds=chrome_time) +``` + +## Browser Profile Paths + +| OS | Browser | Default Path | +|----|---------|-------------| +| Windows | Chrome | `%LOCALAPPDATA%\Google\Chrome\User Data\Default` | +| Windows | Edge | `%LOCALAPPDATA%\Microsoft\Edge\User Data\Default` | +| Linux | Chrome | `~/.config/google-chrome/Default` | +| macOS | Chrome | `~/Library/Application Support/Google/Chrome/Default` | + +## Transition Types (visit_transition & 0xFF) +| Value | Type | Description | +|-------|------|-------------| +| 0 | LINK | Clicked a link | +| 1 | TYPED | Typed URL in address bar | +| 2 | AUTO_BOOKMARK | Via bookmark | +| 3 | AUTO_SUBFRAME | Subframe navigation | +| 5 | GENERATED | Generated (e.g., search) | +| 7 | FORM_SUBMIT | Form submission | +| 8 | RELOAD | Page reload | diff --git a/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/references/standards.md b/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/references/standards.md new file mode 100644 index 0000000..c54b46c --- /dev/null +++ b/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/references/standards.md @@ -0,0 +1,15 @@ +# Standards - Browser Forensics with Hindsight +## Tools +- Hindsight: https://github.com/obsidianforensics/hindsight +- DB Browser for SQLite: Chrome database inspection +- ChromeCacheView (NirSoft): Cache analysis +## Browser Databases +- History: URL visits, downloads, keyword searches +- Cookies: HTTP cookies per domain +- Web Data: Autofill, credit cards +- Login Data: Saved credentials (encrypted) +- Bookmarks: JSON bookmark tree +## Timestamp Formats +- Chrome/WebKit: microseconds since 1601-01-01 UTC +- Firefox/Mozilla: microseconds since Unix epoch +- Safari/Mac: seconds since 2001-01-01 UTC diff --git a/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/references/workflows.md b/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/references/workflows.md new file mode 100644 index 0000000..793f637 --- /dev/null +++ b/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/references/workflows.md @@ -0,0 +1,19 @@ +# Workflows - Browser Forensics +## Workflow: Chrome Profile Analysis +``` +Locate browser profile directory + | +Run Hindsight against profile path + | +Review generated timeline (XLSX/JSON) + | +Analyze URL history for suspicious sites + | +Check downloads for malware/exfiltrated data + | +Review cookies for session hijacking evidence + | +Examine autofill and saved credentials + | +Correlate browser activity with system timeline +``` diff --git a/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/scripts/agent.py b/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/scripts/agent.py new file mode 100644 index 0000000..b3efbe7 --- /dev/null +++ b/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/scripts/agent.py @@ -0,0 +1,254 @@ +#!/usr/bin/env python3 +"""Browser forensics analysis agent using Hindsight concepts. + +Parses Chromium-based browser artifacts (Chrome, Edge, Brave) including +history, downloads, cookies, autofill, and extensions from SQLite databases. +""" + +import os +import sys +import json +import sqlite3 +import datetime + + +def chrome_time_to_datetime(chrome_time): + """Convert Chrome timestamp (microseconds since 1601-01-01) to datetime.""" + if not chrome_time or chrome_time == 0: + return None + try: + epoch = datetime.datetime(1601, 1, 1) + delta = datetime.timedelta(microseconds=chrome_time) + return (epoch + delta).isoformat() + "Z" + except (OverflowError, OSError): + return None + + +def find_browser_profiles(base_path=None): + """Locate Chromium-based browser profile directories.""" + if base_path and os.path.isdir(base_path): + return [base_path] + profiles = [] + home = os.path.expanduser("~") + candidates = [ + os.path.join(home, "AppData", "Local", "Google", "Chrome", "User Data", "Default"), + os.path.join(home, "AppData", "Local", "Microsoft", "Edge", "User Data", "Default"), + os.path.join(home, "AppData", "Local", "BraveSoftware", "Brave-Browser", "User Data", "Default"), + os.path.join(home, ".config", "google-chrome", "Default"), + os.path.join(home, ".config", "chromium", "Default"), + os.path.join(home, ".config", "microsoft-edge", "Default"), + ] + for path in candidates: + if os.path.isdir(path): + profiles.append(path) + return profiles + + +def parse_history(profile_path): + """Parse browsing history from History SQLite database.""" + db_path = os.path.join(profile_path, "History") + if not os.path.exists(db_path): + return [] + entries = [] + try: + conn = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True) + cursor = conn.cursor() + cursor.execute(""" + SELECT u.url, u.title, v.visit_time, v.transition, u.visit_count + FROM visits v JOIN urls u ON v.url = u.id + ORDER BY v.visit_time DESC LIMIT 5000 + """) + for url, title, visit_time, transition, count in cursor.fetchall(): + entries.append({ + "url": url, "title": title or "", + "visit_time": chrome_time_to_datetime(visit_time), + "transition": transition & 0xFF, + "visit_count": count, + }) + conn.close() + except sqlite3.Error as e: + entries.append({"error": str(e)}) + return entries + + +def parse_downloads(profile_path): + """Parse download history from History database.""" + db_path = os.path.join(profile_path, "History") + if not os.path.exists(db_path): + return [] + downloads = [] + try: + conn = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True) + cursor = conn.cursor() + cursor.execute(""" + SELECT target_path, tab_url, total_bytes, start_time, end_time, + danger_type, interrupt_reason, mime_type + FROM downloads ORDER BY start_time DESC LIMIT 1000 + """) + for row in cursor.fetchall(): + downloads.append({ + "target_path": row[0], "source_url": row[1], + "total_bytes": row[2], + "start_time": chrome_time_to_datetime(row[3]), + "end_time": chrome_time_to_datetime(row[4]), + "danger_type": row[5], "interrupt_reason": row[6], + "mime_type": row[7], + }) + conn.close() + except sqlite3.Error as e: + downloads.append({"error": str(e)}) + return downloads + + +def parse_cookies(profile_path): + """Parse cookies from Cookies database.""" + db_path = os.path.join(profile_path, "Cookies") + if not os.path.exists(db_path): + db_path = os.path.join(profile_path, "Network", "Cookies") + if not os.path.exists(db_path): + return [] + cookies = [] + try: + conn = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True) + cursor = conn.cursor() + cursor.execute(""" + SELECT host_key, name, path, creation_utc, expires_utc, + is_secure, is_httponly, samesite + FROM cookies ORDER BY creation_utc DESC LIMIT 2000 + """) + for row in cursor.fetchall(): + cookies.append({ + "host": row[0], "name": row[1], "path": row[2], + "created": chrome_time_to_datetime(row[3]), + "expires": chrome_time_to_datetime(row[4]), + "secure": bool(row[5]), "httponly": bool(row[6]), + "samesite": row[7], + }) + conn.close() + except sqlite3.Error as e: + cookies.append({"error": str(e)}) + return cookies + + +def parse_autofill(profile_path): + """Parse autofill data from Web Data database.""" + db_path = os.path.join(profile_path, "Web Data") + if not os.path.exists(db_path): + return [] + entries = [] + try: + conn = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True) + cursor = conn.cursor() + cursor.execute(""" + SELECT name, value, count, date_created, date_last_used + FROM autofill ORDER BY date_last_used DESC LIMIT 500 + """) + for row in cursor.fetchall(): + entries.append({ + "field_name": row[0], "value": row[1][:50] + "..." if len(row[1]) > 50 else row[1], + "usage_count": row[2], + "created": chrome_time_to_datetime(row[3] * 1000000 if row[3] else 0), + "last_used": chrome_time_to_datetime(row[4] * 1000000 if row[4] else 0), + }) + conn.close() + except sqlite3.Error as e: + entries.append({"error": str(e)}) + return entries + + +def parse_extensions(profile_path): + """Parse installed browser extensions.""" + ext_dir = os.path.join(profile_path, "Extensions") + extensions = [] + if not os.path.isdir(ext_dir): + return extensions + for ext_id in os.listdir(ext_dir): + ext_path = os.path.join(ext_dir, ext_id) + if os.path.isdir(ext_path): + versions = sorted(os.listdir(ext_path)) + manifest_path = os.path.join(ext_path, versions[-1], "manifest.json") if versions else None + name = ext_id + if manifest_path and os.path.exists(manifest_path): + try: + with open(manifest_path, "r", encoding="utf-8") as f: + manifest = json.load(f) + name = manifest.get("name", ext_id) + extensions.append({ + "id": ext_id, "name": name, + "version": manifest.get("version", "?"), + "permissions": manifest.get("permissions", [])[:10], + }) + except (json.JSONDecodeError, IOError): + extensions.append({"id": ext_id, "name": name, "version": "unknown"}) + return extensions + + +def detect_suspicious_activity(history, downloads): + """Flag suspicious browsing and download patterns.""" + findings = [] + suspicious_domains = ["pastebin.com", "ngrok.io", "raw.githubusercontent.com", + "transfer.sh", "file.io", "temp.sh", "anonfiles.com"] + for entry in history: + url = entry.get("url", "").lower() + for domain in suspicious_domains: + if domain in url: + findings.append({ + "type": "suspicious_url", "url": entry["url"], + "domain": domain, "time": entry.get("visit_time"), + }) + dangerous_mimes = ["application/x-msdownload", "application/x-msdos-program", + "application/x-executable", "application/vnd.ms-excel.sheet.macroEnabled"] + for dl in downloads: + if dl.get("danger_type", 0) > 0: + findings.append({ + "type": "dangerous_download", "path": dl.get("target_path"), + "source": dl.get("source_url"), "danger_type": dl.get("danger_type"), + }) + if dl.get("mime_type", "") in dangerous_mimes: + findings.append({ + "type": "suspicious_mime", "mime": dl.get("mime_type"), + "path": dl.get("target_path"), + }) + return findings + + +if __name__ == "__main__": + print("=" * 60) + print("Browser Forensics Analysis Agent") + print("Chromium history, downloads, cookies, extensions") + print("=" * 60) + + target = sys.argv[1] if len(sys.argv) > 1 else None + profiles = find_browser_profiles(target) + + if not profiles: + print("\n[!] No browser profiles found.") + print("[DEMO] Usage: python agent.py ") + print(" e.g. python agent.py ~/AppData/Local/Google/Chrome/User\\ Data/Default") + sys.exit(0) + + for profile in profiles: + print(f"\n[*] Profile: {profile}") + + history = parse_history(profile) + print(f" History entries: {len(history)}") + for h in history[:5]: + print(f" {h.get('visit_time', '?')} | {h.get('title', '')[:50]} | {h.get('url', '')[:60]}") + + downloads = parse_downloads(profile) + print(f" Downloads: {len(downloads)}") + for d in downloads[:5]: + print(f" {d.get('start_time', '?')} | {d.get('mime_type', '?')} | {os.path.basename(d.get('target_path', ''))}") + + cookies = parse_cookies(profile) + print(f" Cookies: {len(cookies)}") + + extensions = parse_extensions(profile) + print(f" Extensions: {len(extensions)}") + for ext in extensions[:5]: + print(f" {ext.get('name', '?')} v{ext.get('version', '?')} [{ext.get('id', '')[:20]}]") + + findings = detect_suspicious_activity(history, downloads) + print(f"\n --- Suspicious Activity: {len(findings)} findings ---") + for f in findings[:10]: + print(f" [{f['type']}] {f.get('url', f.get('path', ''))}") diff --git a/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/scripts/process.py b/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/scripts/process.py new file mode 100644 index 0000000..b204e2e --- /dev/null +++ b/personas/_shared/skills/analyzing-browser-forensics-with-hindsight/scripts/process.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +"""Browser Forensics Analyzer - Parses Chrome History SQLite for investigation.""" +import sqlite3, json, os, sys +from datetime import datetime, timedelta + +CHROME_EPOCH = datetime(1601, 1, 1) + +def chrome_ts(ts): + if not ts: return None + try: return str(CHROME_EPOCH + timedelta(microseconds=ts)) + except: return None + +def analyze_chrome(profile: str, output_dir: str) -> str: + os.makedirs(output_dir, exist_ok=True) + history_db = os.path.join(profile, "History") + conn = sqlite3.connect(f"file:{history_db}?mode=ro", uri=True) + c = conn.cursor() + c.execute("SELECT u.url, u.title, v.visit_time, u.visit_count FROM visits v JOIN urls u ON v.url=u.id ORDER BY v.visit_time DESC LIMIT 2000") + visits = [{"url": r[0], "title": r[1], "time": chrome_ts(r[2]), "count": r[3]} for r in c.fetchall()] + c.execute("SELECT target_path, tab_url, start_time, total_bytes, mime_type FROM downloads ORDER BY start_time DESC LIMIT 500") + downloads = [{"path": r[0], "url": r[1], "time": chrome_ts(r[2]), "size": r[3], "mime": r[4]} for r in c.fetchall()] + conn.close() + report = {"visits": len(visits), "downloads": len(downloads), "visit_data": visits, "download_data": downloads} + out = os.path.join(output_dir, "browser_forensics.json") + with open(out, "w") as f: json.dump(report, f, indent=2) + print(f"[*] Visits: {len(visits)}, Downloads: {len(downloads)}") + return out + +if __name__ == "__main__": + if len(sys.argv) < 3: print("Usage: process.py "); sys.exit(1) + analyze_chrome(sys.argv[1], sys.argv[2]) diff --git a/personas/_shared/skills/analyzing-campaign-attribution-evidence/LICENSE b/personas/_shared/skills/analyzing-campaign-attribution-evidence/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-campaign-attribution-evidence/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-campaign-attribution-evidence/SKILL.md b/personas/_shared/skills/analyzing-campaign-attribution-evidence/SKILL.md new file mode 100644 index 0000000..da9756c --- /dev/null +++ b/personas/_shared/skills/analyzing-campaign-attribution-evidence/SKILL.md @@ -0,0 +1,237 @@ +--- +name: analyzing-campaign-attribution-evidence +description: Campaign attribution analysis involves systematically evaluating evidence to determine which threat actor or + group is responsible for a cyber operation. This skill covers collecting and weighting attr +domain: cybersecurity +subdomain: threat-intelligence +tags: +- threat-intelligence +- cti +- ioc +- mitre-attack +- stix +- attribution +- campaign-analysis +version: '1.0' +author: mahipal +license: Apache-2.0 +nist_csf: +- ID.RA-01 +- ID.RA-05 +- DE.CM-01 +- DE.AE-02 +--- +# Analyzing Campaign Attribution Evidence + +## Overview + +Campaign attribution analysis involves systematically evaluating evidence to determine which threat actor or group is responsible for a cyber operation. This skill covers collecting and weighting attribution indicators using the Diamond Model and ACH (Analysis of Competing Hypotheses), analyzing infrastructure overlaps, TTP consistency, malware code similarities, operational timing patterns, and language artifacts to build confidence-weighted attribution assessments. + + +## When to Use + +- When investigating security incidents that require analyzing campaign attribution evidence +- When building detection rules or threat hunting queries for this domain +- When SOC analysts need structured procedures for this analysis type +- When validating security monitoring coverage for related attack techniques + +## Prerequisites + +- Python 3.9+ with `attackcti`, `stix2`, `networkx` libraries +- Access to threat intelligence platforms (MISP, OpenCTI) +- Understanding of Diamond Model of Intrusion Analysis +- Familiarity with MITRE ATT&CK threat group profiles +- Knowledge of malware analysis and infrastructure tracking techniques + +## Key Concepts + +### Attribution Evidence Categories +1. **Infrastructure Overlap**: Shared C2 servers, domains, IP ranges, hosting providers +2. **TTP Consistency**: Matching ATT&CK techniques and sub-techniques across campaigns +3. **Malware Code Similarity**: Shared code bases, compilers, PDB paths, encryption routines +4. **Operational Patterns**: Timing (working hours, time zones), targeting patterns, operational tempo +5. **Language Artifacts**: Embedded strings, variable names, error messages in specific languages +6. **Victimology**: Target sector, geography, and organizational profile consistency + +### Confidence Levels +- **High Confidence**: Multiple independent evidence categories converge on same actor +- **Moderate Confidence**: Several evidence categories match, some ambiguity remains +- **Low Confidence**: Limited evidence, possible false flags or shared tooling + +### Analysis of Competing Hypotheses (ACH) +Structured analytical method that evaluates evidence against multiple competing hypotheses. Each piece of evidence is scored as consistent, inconsistent, or neutral with respect to each hypothesis. The hypothesis with the least inconsistent evidence is favored. + +## Workflow + +### Step 1: Collect Attribution Evidence + +```python +from stix2 import MemoryStore, Filter +from collections import defaultdict + +class AttributionAnalyzer: + def __init__(self): + self.evidence = [] + self.hypotheses = {} + + def add_evidence(self, category, description, value, confidence): + self.evidence.append({ + "category": category, + "description": description, + "value": value, + "confidence": confidence, + "timestamp": None, + }) + + def add_hypothesis(self, actor_name, actor_id=""): + self.hypotheses[actor_name] = { + "actor_id": actor_id, + "consistent_evidence": [], + "inconsistent_evidence": [], + "neutral_evidence": [], + "score": 0, + } + + def evaluate_evidence(self, evidence_idx, actor_name, assessment): + """Assess evidence against a hypothesis: consistent/inconsistent/neutral.""" + if assessment == "consistent": + self.hypotheses[actor_name]["consistent_evidence"].append(evidence_idx) + self.hypotheses[actor_name]["score"] += self.evidence[evidence_idx]["confidence"] + elif assessment == "inconsistent": + self.hypotheses[actor_name]["inconsistent_evidence"].append(evidence_idx) + self.hypotheses[actor_name]["score"] -= self.evidence[evidence_idx]["confidence"] * 2 + else: + self.hypotheses[actor_name]["neutral_evidence"].append(evidence_idx) + + def rank_hypotheses(self): + """Rank hypotheses by attribution score.""" + ranked = sorted( + self.hypotheses.items(), + key=lambda x: x[1]["score"], + reverse=True, + ) + return [ + { + "actor": name, + "score": data["score"], + "consistent": len(data["consistent_evidence"]), + "inconsistent": len(data["inconsistent_evidence"]), + "confidence": self._score_to_confidence(data["score"]), + } + for name, data in ranked + ] + + def _score_to_confidence(self, score): + if score >= 80: + return "HIGH" + elif score >= 40: + return "MODERATE" + else: + return "LOW" +``` + +### Step 2: Infrastructure Overlap Analysis + +```python +def analyze_infrastructure_overlap(campaign_a_infra, campaign_b_infra): + """Compare infrastructure between two campaigns for attribution.""" + overlap = { + "shared_ips": set(campaign_a_infra.get("ips", [])).intersection( + campaign_b_infra.get("ips", []) + ), + "shared_domains": set(campaign_a_infra.get("domains", [])).intersection( + campaign_b_infra.get("domains", []) + ), + "shared_asns": set(campaign_a_infra.get("asns", [])).intersection( + campaign_b_infra.get("asns", []) + ), + "shared_registrars": set(campaign_a_infra.get("registrars", [])).intersection( + campaign_b_infra.get("registrars", []) + ), + } + + overlap_score = 0 + if overlap["shared_ips"]: + overlap_score += 30 + if overlap["shared_domains"]: + overlap_score += 25 + if overlap["shared_asns"]: + overlap_score += 15 + if overlap["shared_registrars"]: + overlap_score += 10 + + return { + "overlap": {k: list(v) for k, v in overlap.items()}, + "overlap_score": overlap_score, + "assessment": "STRONG" if overlap_score >= 40 else "MODERATE" if overlap_score >= 20 else "WEAK", + } +``` + +### Step 3: TTP Comparison Across Campaigns + +```python +from attackcti import attack_client + +def compare_campaign_ttps(campaign_techniques, known_actor_techniques): + """Compare campaign TTPs against known threat actor profiles.""" + campaign_set = set(campaign_techniques) + actor_set = set(known_actor_techniques) + + common = campaign_set.intersection(actor_set) + unique_campaign = campaign_set - actor_set + unique_actor = actor_set - campaign_set + + jaccard = len(common) / len(campaign_set.union(actor_set)) if campaign_set.union(actor_set) else 0 + + return { + "common_techniques": sorted(common), + "common_count": len(common), + "unique_to_campaign": sorted(unique_campaign), + "unique_to_actor": sorted(unique_actor), + "jaccard_similarity": round(jaccard, 3), + "overlap_percentage": round(len(common) / len(campaign_set) * 100, 1) if campaign_set else 0, + } +``` + +### Step 4: Generate Attribution Report + +```python +def generate_attribution_report(analyzer): + """Generate structured attribution assessment report.""" + rankings = analyzer.rank_hypotheses() + + report = { + "assessment_date": "2026-02-23", + "total_evidence_items": len(analyzer.evidence), + "hypotheses_evaluated": len(analyzer.hypotheses), + "rankings": rankings, + "primary_attribution": rankings[0] if rankings else None, + "evidence_summary": [ + { + "index": i, + "category": e["category"], + "description": e["description"], + "confidence": e["confidence"], + } + for i, e in enumerate(analyzer.evidence) + ], + } + + return report +``` + +## Validation Criteria + +- Evidence collection covers all six attribution categories +- ACH matrix properly evaluates evidence against competing hypotheses +- Infrastructure overlap analysis identifies shared indicators +- TTP comparison uses ATT&CK technique IDs for precision +- Attribution confidence levels are properly justified +- Report includes alternative hypotheses and false flag considerations + +## References + +- [Diamond Model of Intrusion Analysis](https://www.activeresponse.org/wp-content/uploads/2013/07/diamond.pdf) +- [MITRE ATT&CK Groups](https://attack.mitre.org/groups/) +- [Analysis of Competing Hypotheses](https://www.cia.gov/static/9a5f1162fd0932c29e985f0159f56c07/Tradecraft-Primer-apr09.pdf) +- [Threat Attribution Framework](https://www.mandiant.com/resources/reports) diff --git a/personas/_shared/skills/analyzing-campaign-attribution-evidence/assets/template.md b/personas/_shared/skills/analyzing-campaign-attribution-evidence/assets/template.md new file mode 100644 index 0000000..c43da08 --- /dev/null +++ b/personas/_shared/skills/analyzing-campaign-attribution-evidence/assets/template.md @@ -0,0 +1,39 @@ +# Campaign Attribution Analysis Report Template + +## Report Metadata +| Field | Value | +|-------|-------| +| Report ID | CTI-YYYY-NNNN | +| Date | YYYY-MM-DD | +| Classification | TLP:AMBER | +| Analyst | [Name] | +| Confidence | High/Moderate/Low | + +## Executive Summary +[Brief overview of key findings and their significance] + +## Key Findings +1. [Finding 1 with supporting evidence] +2. [Finding 2 with supporting evidence] +3. [Finding 3 with supporting evidence] + +## Detailed Analysis +### Finding 1 +- **Evidence**: [Description of evidence] +- **Confidence**: High/Moderate/Low +- **MITRE ATT&CK**: [Relevant technique IDs] +- **Impact Assessment**: [Potential impact to organization] + +## Indicators of Compromise +| Type | Value | Context | Confidence | +|------|-------|---------|-----------| +| | | | | + +## Recommendations +1. **Immediate**: [Actions requiring immediate attention] +2. **Short-term**: [Actions within 1-2 weeks] +3. **Long-term**: [Strategic improvements] + +## References +- [Source 1] +- [Source 2] diff --git a/personas/_shared/skills/analyzing-campaign-attribution-evidence/references/api-reference.md b/personas/_shared/skills/analyzing-campaign-attribution-evidence/references/api-reference.md new file mode 100644 index 0000000..96ccc8a --- /dev/null +++ b/personas/_shared/skills/analyzing-campaign-attribution-evidence/references/api-reference.md @@ -0,0 +1,110 @@ +# API Reference: Campaign Attribution Evidence Analysis + +## Diamond Model of Intrusion Analysis + +### Four Core Features +| Feature | Description | Attribution Value | +|---------|-------------|-------------------| +| Adversary | Threat actor identity | Direct attribution | +| Capability | Malware, exploits, tools | Indirect - shared tooling | +| Infrastructure | C2, domains, IPs | Strong - operational overlap | +| Victim | Targets, sectors, regions | Contextual - targeting pattern | + +### Pivot Analysis +``` +Adversary ←→ Capability ←→ Infrastructure ←→ Victim + ↕ ↕ ↕ ↕ + (HUMINT) (Malware DB) (WHOIS/DNS) (Victimology) +``` + +## Analysis of Competing Hypotheses (ACH) + +### Matrix Format +``` +Evidence \ Hypothesis | APT28 | APT29 | Lazarus | Unknown +----------------------------------------------------------------- +Infrastructure overlap | ++ | - | - | N +TTP consistency | ++ | ++ | - | N +Malware similarity | + | - | - | N +Timing (UTC+3) | ++ | ++ | - | N +Language (Russian) | ++ | ++ | - | N +``` + +### Scoring +| Symbol | Meaning | Weight | +|--------|---------|--------| +| `++` | Strongly consistent | +2 | +| `+` | Consistent | +1 | +| `N` | Neutral | 0 | +| `-` | Inconsistent | -1 | +| `--` | Strongly inconsistent | -2 | + +## MITRE ATT&CK Group Queries + +### Python (mitreattack-python) +```python +from mitreattack.stix20 import MitreAttackData +attack = MitreAttackData("enterprise-attack.json") +group = attack.get_group_by_alias("APT29") +techniques = attack.get_techniques_used_by_group(group.id) +``` + +### STIX2 Relationship Query +```python +from stix2 import Filter +relationships = src.query([ + Filter("type", "=", "relationship"), + Filter("source_ref", "=", group_id), + Filter("relationship_type", "=", "uses"), +]) +``` + +## Infrastructure Overlap Tools + +### PassiveTotal / RiskIQ +```bash +# WHOIS history +curl -u user:key "https://api.passivetotal.org/v2/whois?query=domain.com" + +# Passive DNS +curl -u user:key "https://api.passivetotal.org/v2/dns/passive?query=1.2.3.4" +``` + +### VirusTotal Relations +```bash +curl -H "x-apikey: KEY" \ + "https://www.virustotal.com/api/v3/domains/example.com/communicating_files" +``` + +## Confidence Assessment Framework + +| Level | Score Range | Criteria | +|-------|------------|---------| +| HIGH | 0.8-1.0 | Multiple independent evidence types converge | +| MEDIUM | 0.5-0.8 | Significant evidence with some gaps | +| LOW | 0.2-0.5 | Limited evidence, alternative hypotheses remain | +| NEGLIGIBLE | 0.0-0.2 | Insufficient evidence for attribution | + +## STIX Attribution Objects + +### Campaign Object +```json +{ + "type": "campaign", + "name": "Operation DarkShadow", + "first_seen": "2024-01-15T00:00:00Z", + "last_seen": "2024-03-20T00:00:00Z", + "objective": "Espionage targeting defense sector" +} +``` + +### Attribution Relationship +```json +{ + "type": "relationship", + "relationship_type": "attributed-to", + "source_ref": "campaign--abc123", + "target_ref": "intrusion-set--def456", + "confidence": 75 +} +``` diff --git a/personas/_shared/skills/analyzing-campaign-attribution-evidence/references/standards.md b/personas/_shared/skills/analyzing-campaign-attribution-evidence/references/standards.md new file mode 100644 index 0000000..fb9c60d --- /dev/null +++ b/personas/_shared/skills/analyzing-campaign-attribution-evidence/references/standards.md @@ -0,0 +1,24 @@ +# Standards and Frameworks Reference + +## Applicable Standards +- **STIX 2.1**: Structured Threat Information eXpression for CTI data representation +- **TAXII 2.1**: Transport protocol for sharing CTI over HTTPS +- **MITRE ATT&CK**: Adversary tactics, techniques, and procedures taxonomy +- **Diamond Model**: Intrusion analysis framework (Adversary, Capability, Infrastructure, Victim) +- **Traffic Light Protocol (TLP)**: Information sharing classification (CLEAR, GREEN, AMBER, RED) + +## MITRE ATT&CK Relevance +- Technique mapping for threat actor behavior classification +- Data sources for detection capability assessment +- Mitigation strategies linked to specific techniques + +## Industry Frameworks +- NIST Cybersecurity Framework (CSF) 2.0 - Identify function +- ISO 27001:2022 - A.5.7 Threat Intelligence +- FIRST Standards - TLP, CSIRT, vulnerability coordination + +## References +- [STIX 2.1 Specification](https://docs.oasis-open.org/cti/stix/v2.1/stix-v2.1.html) +- [MITRE ATT&CK](https://attack.mitre.org/) +- [Diamond Model Paper](https://www.activeresponse.org/wp-content/uploads/2013/07/diamond.pdf) +- [NIST CSF 2.0](https://www.nist.gov/cyberframework) diff --git a/personas/_shared/skills/analyzing-campaign-attribution-evidence/references/workflows.md b/personas/_shared/skills/analyzing-campaign-attribution-evidence/references/workflows.md new file mode 100644 index 0000000..8630223 --- /dev/null +++ b/personas/_shared/skills/analyzing-campaign-attribution-evidence/references/workflows.md @@ -0,0 +1,31 @@ +# Campaign Attribution Analysis Workflows + +## Workflow 1: Collection and Analysis +``` +[Intelligence Sources] --> [Data Collection] --> [Analysis] --> [Reporting] + | | | | + v v v v + OSINT/HUMINT/SIGINT Normalize/Enrich Assess/Correlate Disseminate +``` + +### Steps: +1. **Planning**: Define intelligence requirements and collection priorities +2. **Collection**: Gather data from relevant sources +3. **Processing**: Normalize data formats and filter noise +4. **Analysis**: Apply analytical frameworks and correlate findings +5. **Production**: Generate intelligence products and reports +6. **Dissemination**: Share with stakeholders via appropriate channels +7. **Feedback**: Collect consumer feedback to refine future collection + +## Workflow 2: Continuous Monitoring +``` +[Watchlist] --> [Automated Monitoring] --> [Change Detection] --> [Alert/Update] +``` + +### Steps: +1. **Define Watchlist**: Identify indicators, actors, and topics to monitor +2. **Configure Monitoring**: Set up automated collection from relevant sources +3. **Change Detection**: Identify new or changed intelligence +4. **Assessment**: Evaluate significance of changes +5. **Alerting**: Notify stakeholders of significant intelligence updates +6. **Archive**: Store intelligence for historical analysis and trending diff --git a/personas/_shared/skills/analyzing-campaign-attribution-evidence/scripts/agent.py b/personas/_shared/skills/analyzing-campaign-attribution-evidence/scripts/agent.py new file mode 100644 index 0000000..ac5608f --- /dev/null +++ b/personas/_shared/skills/analyzing-campaign-attribution-evidence/scripts/agent.py @@ -0,0 +1,253 @@ +#!/usr/bin/env python3 +"""Campaign attribution analysis agent using Diamond Model and ACH methodology. + +Evaluates attribution evidence including infrastructure overlaps, TTP consistency, +malware code similarity, timing patterns, and language artifacts. +""" + +import json +import re +from collections import defaultdict +from datetime import datetime + + +DIAMOND_DIMENSIONS = { + "adversary": "Threat actor identity, group attribution", + "capability": "Malware, exploits, tools used", + "infrastructure": "C2 servers, domains, IP addresses", + "victim": "Targeted sectors, regions, organizations", +} + +EVIDENCE_WEIGHTS = { + "infrastructure_overlap": 0.25, + "ttp_consistency": 0.30, + "malware_code_similarity": 0.25, + "timing_pattern": 0.10, + "language_artifact": 0.10, +} + +CONFIDENCE_LEVELS = { + (0.8, 1.0): "HIGH - Strong attribution confidence", + (0.5, 0.8): "MEDIUM - Moderate attribution, further analysis recommended", + (0.2, 0.5): "LOW - Weak attribution, insufficient evidence", + (0.0, 0.2): "NEGLIGIBLE - No meaningful attribution possible", +} + + +def diamond_model_analysis(adversary=None, capability=None, infrastructure=None, victim=None): + """Structure evidence using the Diamond Model of Intrusion Analysis.""" + model = { + "adversary": { + "identified": adversary is not None, + "details": adversary or "Unknown", + }, + "capability": { + "tools": capability.get("tools", []) if capability else [], + "exploits": capability.get("exploits", []) if capability else [], + "malware": capability.get("malware", []) if capability else [], + }, + "infrastructure": { + "c2_servers": infrastructure.get("c2", []) if infrastructure else [], + "domains": infrastructure.get("domains", []) if infrastructure else [], + "ip_addresses": infrastructure.get("ips", []) if infrastructure else [], + }, + "victim": { + "sectors": victim.get("sectors", []) if victim else [], + "regions": victim.get("regions", []) if victim else [], + }, + "pivot_opportunities": [], + } + if infrastructure and infrastructure.get("c2"): + model["pivot_opportunities"].append("Pivot from C2 infrastructure to related campaigns") + if capability and capability.get("malware"): + model["pivot_opportunities"].append("Pivot from malware samples to shared infrastructure") + return model + + +def evaluate_infrastructure_overlap(campaign_infra, known_actor_infra): + """Score infrastructure overlap between campaign and known actor.""" + campaign_set = set(campaign_infra) + known_set = set(known_actor_infra) + if not campaign_set or not known_set: + return 0.0, [] + overlap = campaign_set & known_set + score = len(overlap) / max(len(campaign_set), len(known_set)) + return round(score, 4), sorted(overlap) + + +def evaluate_ttp_consistency(campaign_ttps, actor_ttps): + """Score TTP consistency using MITRE ATT&CK technique overlap.""" + campaign_set = set(campaign_ttps) + actor_set = set(actor_ttps) + if not campaign_set or not actor_set: + return 0.0, [] + overlap = campaign_set & actor_set + jaccard = len(overlap) / len(campaign_set | actor_set) + return round(jaccard, 4), sorted(overlap) + + +def evaluate_malware_similarity(sample_features, known_features): + """Score malware code similarity based on feature comparison.""" + if not sample_features or not known_features: + return 0.0 + matches = 0 + total = max(len(sample_features), len(known_features)) + for feature in sample_features: + if feature in known_features: + matches += 1 + return round(matches / total, 4) if total > 0 else 0.0 + + +def evaluate_timing_pattern(campaign_timestamps, actor_timezone_offset=None): + """Analyze operational timing to infer timezone/working hours.""" + if not campaign_timestamps: + return {"score": 0.0, "working_hours": None, "timezone_guess": None} + hours = [] + for ts in campaign_timestamps: + try: + if isinstance(ts, str): + dt = datetime.fromisoformat(ts.replace("Z", "+00:00")) + else: + dt = ts + adjusted = dt.hour + (actor_timezone_offset or 0) + hours.append(adjusted % 24) + except (ValueError, TypeError): + continue + if not hours: + return {"score": 0.0} + work_hours = sum(1 for h in hours if 8 <= h <= 18) + work_ratio = work_hours / len(hours) + avg_hour = sum(hours) / len(hours) + return { + "score": round(work_ratio, 4), + "average_hour_utc": round(avg_hour, 1), + "work_hour_ratio": round(work_ratio, 4), + "sample_size": len(hours), + } + + +def evaluate_language_artifacts(strings_list): + """Detect language artifacts in malware strings or documents.""" + language_indicators = { + "Russian": [r"[а-яА-Я]{3,}", r"codepage.*1251", r"locale.*ru"], + "Chinese": [r"[\u4e00-\u9fff]{2,}", r"codepage.*936", r"GB2312"], + "Korean": [r"[\uac00-\ud7af]{2,}", r"codepage.*949", r"EUC-KR"], + "Farsi": [r"[\u0600-\u06ff]{3,}", r"codepage.*1256"], + "English": [r"\b(the|and|for|with)\b"], + } + detections = defaultdict(int) + for s in strings_list: + for lang, patterns in language_indicators.items(): + for pattern in patterns: + if re.search(pattern, s, re.IGNORECASE): + detections[lang] += 1 + total = sum(detections.values()) or 1 + scored = {lang: round(count / total, 4) for lang, count in detections.items()} + return scored + + +def ach_analysis(hypotheses, evidence_items): + """Analysis of Competing Hypotheses (ACH) for attribution.""" + matrix = {} + for hyp in hypotheses: + hyp_name = hyp["name"] + matrix[hyp_name] = {"consistent": 0, "inconsistent": 0, "neutral": 0, "score": 0} + for evidence in evidence_items: + ev_name = evidence["name"] + consistency = evidence.get("hypotheses", {}).get(hyp_name, "neutral") + if consistency == "consistent": + matrix[hyp_name]["consistent"] += evidence.get("weight", 1) + elif consistency == "inconsistent": + matrix[hyp_name]["inconsistent"] += evidence.get("weight", 1) + else: + matrix[hyp_name]["neutral"] += evidence.get("weight", 1) + c = matrix[hyp_name]["consistent"] + i = matrix[hyp_name]["inconsistent"] + matrix[hyp_name]["score"] = round((c - i) / (c + i + 0.01), 4) + return matrix + + +def compute_attribution_score(scores): + """Compute weighted attribution confidence score.""" + total = 0.0 + for evidence_type, weight in EVIDENCE_WEIGHTS.items(): + score = scores.get(evidence_type, 0.0) + total += score * weight + confidence = "UNKNOWN" + for (low, high), label in CONFIDENCE_LEVELS.items(): + if low <= total < high: + confidence = label + break + return round(total, 4), confidence + + +def generate_attribution_report(campaign_name, candidate_actor, evidence): + """Generate structured attribution assessment report.""" + scores = {} + details = {} + + infra_score, infra_overlap = evaluate_infrastructure_overlap( + evidence.get("campaign_infra", []), evidence.get("actor_infra", [])) + scores["infrastructure_overlap"] = infra_score + details["infrastructure_overlap"] = infra_overlap + + ttp_score, ttp_overlap = evaluate_ttp_consistency( + evidence.get("campaign_ttps", []), evidence.get("actor_ttps", [])) + scores["ttp_consistency"] = ttp_score + details["ttp_consistency"] = ttp_overlap + + malware_score = evaluate_malware_similarity( + evidence.get("sample_features", []), evidence.get("known_features", [])) + scores["malware_code_similarity"] = malware_score + + timing = evaluate_timing_pattern( + evidence.get("timestamps", []), evidence.get("tz_offset")) + scores["timing_pattern"] = timing.get("score", 0.0) + details["timing"] = timing + + lang = evaluate_language_artifacts(evidence.get("strings", [])) + scores["language_artifact"] = max(lang.values()) if lang else 0.0 + details["language_artifacts"] = lang + + total_score, confidence = compute_attribution_score(scores) + + return { + "campaign": campaign_name, + "candidate_actor": candidate_actor, + "attribution_score": total_score, + "confidence_level": confidence, + "evidence_scores": scores, + "evidence_details": details, + } + + +if __name__ == "__main__": + print("=" * 60) + print("Campaign Attribution Evidence Analysis Agent") + print("Diamond Model, ACH, TTP/infrastructure/malware scoring") + print("=" * 60) + + demo_evidence = { + "campaign_infra": ["185.220.101.1", "evil-domain.com", "c2.attacker.net"], + "actor_infra": ["185.220.101.1", "c2.attacker.net", "other-domain.org"], + "campaign_ttps": ["T1566.001", "T1059.001", "T1053.005", "T1071.001", "T1041"], + "actor_ttps": ["T1566.001", "T1059.001", "T1053.005", "T1071.001", "T1021.001", "T1003.001"], + "sample_features": ["xor_0x55", "mutex_Global\\QWE", "ua_Mozilla5", "rc4_key"], + "known_features": ["xor_0x55", "mutex_Global\\QWE", "ua_Mozilla5", "aes_cbc"], + "timestamps": ["2024-03-15T06:30:00Z", "2024-03-15T07:15:00Z", + "2024-03-16T08:00:00Z", "2024-03-16T09:45:00Z"], + "tz_offset": 3, + "strings": ["Привет мир", "connect to server", "upload file"], + } + + report = generate_attribution_report("Operation DarkShadow", "APT29", demo_evidence) + + print(f"\n[*] Campaign: {report['campaign']}") + print(f"[*] Candidate: {report['candidate_actor']}") + print(f"[*] Attribution Score: {report['attribution_score']}") + print(f"[*] Confidence: {report['confidence_level']}") + print("\n--- Evidence Scores ---") + for ev, score in report["evidence_scores"].items(): + weight = EVIDENCE_WEIGHTS.get(ev, 0) + print(f" {ev:30s} score={score:.4f} weight={weight}") + print(f"\n[*] Full report:\n{json.dumps(report, indent=2, default=str)}") diff --git a/personas/_shared/skills/analyzing-campaign-attribution-evidence/scripts/process.py b/personas/_shared/skills/analyzing-campaign-attribution-evidence/scripts/process.py new file mode 100644 index 0000000..3e1a9ae --- /dev/null +++ b/personas/_shared/skills/analyzing-campaign-attribution-evidence/scripts/process.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python3 +""" +Campaign Attribution Evidence Analysis Script + +Implements structured attribution analysis: +- Analysis of Competing Hypotheses (ACH) matrix +- Infrastructure overlap scoring +- TTP similarity comparison using ATT&CK +- Evidence weighting and confidence assessment + +Requirements: + pip install attackcti stix2 requests + +Usage: + python process.py --evidence evidence.json --hypotheses actors.json --output report.json + python process.py --compare-ttps --campaign campaign_techs.json --actor APT29 +""" + +import argparse +import json +import sys +from collections import defaultdict + + +class AttributionEngine: + """Structured attribution analysis using ACH methodology.""" + + def __init__(self): + self.evidence = [] + self.hypotheses = {} + + def load_evidence(self, filepath): + with open(filepath) as f: + self.evidence = json.load(f) + + def add_evidence(self, category, description, value, confidence): + self.evidence.append({ + "id": len(self.evidence), + "category": category, + "description": description, + "value": value, + "confidence": confidence, + }) + + def add_hypothesis(self, actor_name, supporting_info=""): + self.hypotheses[actor_name] = { + "info": supporting_info, + "assessments": {}, + "score": 0, + } + + def evaluate(self, evidence_id, actor_name, assessment): + """Evaluate evidence against hypothesis: C=consistent, I=inconsistent, N=neutral.""" + weight = self.evidence[evidence_id]["confidence"] + self.hypotheses[actor_name]["assessments"][evidence_id] = assessment + + if assessment == "C": + self.hypotheses[actor_name]["score"] += weight + elif assessment == "I": + self.hypotheses[actor_name]["score"] -= weight * 2 + + def generate_ach_matrix(self): + matrix = {"evidence": [], "hypotheses": {}} + for e in self.evidence: + matrix["evidence"].append({ + "id": e["id"], + "category": e["category"], + "description": e["description"], + }) + + for actor, data in self.hypotheses.items(): + matrix["hypotheses"][actor] = { + "assessments": data["assessments"], + "score": data["score"], + "consistent": sum(1 for a in data["assessments"].values() if a == "C"), + "inconsistent": sum(1 for a in data["assessments"].values() if a == "I"), + "neutral": sum(1 for a in data["assessments"].values() if a == "N"), + } + + return matrix + + def rank(self): + ranked = sorted( + self.hypotheses.items(), key=lambda x: x[1]["score"], reverse=True + ) + results = [] + for name, data in ranked: + incon = sum(1 for a in data["assessments"].values() if a == "I") + confidence = "HIGH" if data["score"] >= 80 and incon == 0 else \ + "MODERATE" if data["score"] >= 40 else "LOW" + results.append({ + "actor": name, + "score": data["score"], + "confidence": confidence, + "inconsistent_count": incon, + }) + return results + + +def compare_ttp_similarity(campaign_techs, actor_techs): + campaign_set = set(campaign_techs) + actor_set = set(actor_techs) + common = campaign_set & actor_set + + jaccard = len(common) / len(campaign_set | actor_set) if (campaign_set | actor_set) else 0 + return { + "common": sorted(common), + "jaccard_similarity": round(jaccard, 3), + "campaign_coverage": round(len(common) / len(campaign_set) * 100, 1) if campaign_set else 0, + } + + +def main(): + parser = argparse.ArgumentParser(description="Campaign Attribution Analysis") + parser.add_argument("--evidence", help="Evidence JSON file") + parser.add_argument("--hypotheses", help="Hypotheses JSON file") + parser.add_argument("--compare-ttps", action="store_true") + parser.add_argument("--campaign", help="Campaign techniques JSON") + parser.add_argument("--actor", help="Actor name for ATT&CK lookup") + parser.add_argument("--output", default="attribution_report.json") + + args = parser.parse_args() + engine = AttributionEngine() + + if args.evidence and args.hypotheses: + engine.load_evidence(args.evidence) + with open(args.hypotheses) as f: + hyps = json.load(f) + for h in hyps: + engine.add_hypothesis(h["name"], h.get("info", "")) + for eid, assessment in h.get("evaluations", {}).items(): + engine.evaluate(int(eid), h["name"], assessment) + + matrix = engine.generate_ach_matrix() + rankings = engine.rank() + report = {"ach_matrix": matrix, "rankings": rankings} + print(json.dumps(report, indent=2)) + + with open(args.output, "w") as f: + json.dump(report, f, indent=2) + + elif args.compare_ttps and args.campaign: + with open(args.campaign) as f: + campaign_techs = json.load(f) + + if args.actor: + try: + from attackcti import attack_client + lift = attack_client() + groups = lift.get_groups() + group = next( + (g for g in groups if args.actor.lower() in g.get("name", "").lower()), + None, + ) + if group: + gid = group["external_references"][0]["external_id"] + techs = lift.get_techniques_used_by_group(gid) + actor_techs = [ + t["external_references"][0]["external_id"] + for t in techs if t.get("external_references") + ] + result = compare_ttp_similarity(campaign_techs, actor_techs) + print(json.dumps(result, indent=2)) + except ImportError: + print("[-] attackcti not installed") + + +if __name__ == "__main__": + main() diff --git a/personas/_shared/skills/analyzing-certificate-transparency-for-phishing/LICENSE b/personas/_shared/skills/analyzing-certificate-transparency-for-phishing/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-certificate-transparency-for-phishing/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-certificate-transparency-for-phishing/SKILL.md b/personas/_shared/skills/analyzing-certificate-transparency-for-phishing/SKILL.md new file mode 100644 index 0000000..0ae3157 --- /dev/null +++ b/personas/_shared/skills/analyzing-certificate-transparency-for-phishing/SKILL.md @@ -0,0 +1,336 @@ +--- +name: analyzing-certificate-transparency-for-phishing +description: Monitor Certificate Transparency logs using crt.sh and Certstream to detect phishing domains, lookalike certificates, + and unauthorized certificate issuance targeting your organization. +domain: cybersecurity +subdomain: threat-intelligence +tags: +- certificate-transparency +- ct-logs +- phishing +- crt-sh +- certstream +- ssl +- domain-monitoring +- threat-intelligence +version: '1.0' +author: mahipal +license: Apache-2.0 +atlas_techniques: +- AML.T0052 +nist_csf: +- ID.RA-01 +- ID.RA-05 +- DE.CM-01 +- DE.AE-02 +--- +# Analyzing Certificate Transparency for Phishing + +## Overview + +Certificate Transparency (CT) is an Internet security standard that creates a public, append-only log of all issued SSL/TLS certificates. Monitoring CT logs enables early detection of phishing domains that register certificates mimicking legitimate brands, unauthorized certificate issuance for owned domains, and certificate-based attack infrastructure. This skill covers querying CT logs via crt.sh, real-time monitoring with Certstream, building automated alerting for suspicious certificates, and integrating findings into threat intelligence workflows. + + +## When to Use + +- When investigating security incidents that require analyzing certificate transparency for phishing +- When building detection rules or threat hunting queries for this domain +- When SOC analysts need structured procedures for this analysis type +- When validating security monitoring coverage for related attack techniques + +## Prerequisites + +- Python 3.9+ with `requests`, `certstream`, `tldextract`, `Levenshtein` libraries +- Access to crt.sh (https://crt.sh/) for historical CT log queries +- Certstream (https://certstream.calidog.io/) for real-time monitoring +- List of organization domains and brand keywords to monitor +- Understanding of SSL/TLS certificate structure and issuance process + +## Key Concepts + +### Certificate Transparency Logs + +CT logs are cryptographically assured, publicly auditable, append-only records of TLS certificate issuance. Major CAs (Let's Encrypt, DigiCert, Sectigo, Google Trust Services) submit all issued certificates to multiple CT logs. As of 2025, Chrome and Safari require CT for all publicly trusted certificates. + +### Phishing Detection via CT + +Attackers register lookalike domains and obtain free certificates (often from Let's Encrypt) to make phishing sites appear legitimate with HTTPS. CT monitoring detects these early because the certificate appears in logs before the phishing campaign launches, providing a window for proactive blocking. + +### crt.sh Database + +crt.sh is a free web interface and PostgreSQL database operated by Sectigo that indexes CT logs. It supports wildcard searches (`%.example.com`), direct SQL queries, and JSON API responses. It tracks certificate issuance, expiration, and revocation across all major CT logs. + +## Workflow + +### Step 1: Query crt.sh for Certificate History + +```python +import requests +import json +from datetime import datetime +import tldextract + +class CTLogMonitor: + CRT_SH_URL = "https://crt.sh" + + def __init__(self, monitored_domains, brand_keywords): + self.monitored_domains = monitored_domains + self.brand_keywords = [k.lower() for k in brand_keywords] + + def query_crt_sh(self, domain, include_expired=False): + """Query crt.sh for certificates matching a domain.""" + params = { + "q": f"%.{domain}", + "output": "json", + } + if not include_expired: + params["exclude"] = "expired" + + resp = requests.get(self.CRT_SH_URL, params=params, timeout=30) + if resp.status_code == 200: + certs = resp.json() + print(f"[+] crt.sh: {len(certs)} certificates for *.{domain}") + return certs + return [] + + def find_suspicious_certs(self, domain): + """Find certificates that may be phishing attempts.""" + certs = self.query_crt_sh(domain) + suspicious = [] + + for cert in certs: + common_name = cert.get("common_name", "").lower() + name_value = cert.get("name_value", "").lower() + issuer = cert.get("issuer_name", "") + not_before = cert.get("not_before", "") + not_after = cert.get("not_after", "") + + # Check for exact domain matches (legitimate) + extracted = tldextract.extract(common_name) + cert_domain = f"{extracted.domain}.{extracted.suffix}" + if cert_domain == domain: + continue # Legitimate certificate + + # Flag suspicious patterns + flags = [] + if domain.replace(".", "") in common_name.replace(".", ""): + flags.append("contains target domain string") + if any(kw in common_name for kw in self.brand_keywords): + flags.append("contains brand keyword") + if "let's encrypt" in issuer.lower(): + flags.append("free CA (Let's Encrypt)") + + if flags: + suspicious.append({ + "common_name": cert.get("common_name", ""), + "name_value": cert.get("name_value", ""), + "issuer": issuer, + "not_before": not_before, + "not_after": not_after, + "serial": cert.get("serial_number", ""), + "flags": flags, + "crt_sh_id": cert.get("id", ""), + "crt_sh_url": f"https://crt.sh/?id={cert.get('id', '')}", + }) + + print(f"[+] Found {len(suspicious)} suspicious certificates") + return suspicious + +monitor = CTLogMonitor( + monitored_domains=["mycompany.com", "mycompany.org"], + brand_keywords=["mycompany", "mybrand", "myproduct"], +) +suspicious = monitor.find_suspicious_certs("mycompany.com") +for cert in suspicious[:5]: + print(f" [{cert['common_name']}] Flags: {cert['flags']}") +``` + +### Step 2: Real-Time Monitoring with Certstream + +```python +import certstream +import Levenshtein +import re +from datetime import datetime + +class CertstreamMonitor: + def __init__(self, watched_domains, brand_keywords, similarity_threshold=0.8): + self.watched_domains = [d.lower() for d in watched_domains] + self.brand_keywords = [k.lower() for k in brand_keywords] + self.threshold = similarity_threshold + self.alerts = [] + + def start_monitoring(self, max_alerts=100): + """Start real-time CT log monitoring.""" + print("[*] Starting Certstream monitoring...") + print(f" Watching: {self.watched_domains}") + print(f" Keywords: {self.brand_keywords}") + + def callback(message, context): + if message["message_type"] == "certificate_update": + data = message["data"] + leaf = data.get("leaf_cert", {}) + all_domains = leaf.get("all_domains", []) + + for domain in all_domains: + domain_lower = domain.lower().strip("*.") + if self._is_suspicious(domain_lower): + alert = { + "domain": domain, + "all_domains": all_domains, + "issuer": leaf.get("issuer", {}).get("O", ""), + "fingerprint": leaf.get("fingerprint", ""), + "not_before": leaf.get("not_before", ""), + "detected_at": datetime.now().isoformat(), + "reason": self._get_reason(domain_lower), + } + self.alerts.append(alert) + print(f" [ALERT] {domain} - {alert['reason']}") + + if len(self.alerts) >= max_alerts: + raise KeyboardInterrupt + + try: + certstream.listen_for_events(callback, url="wss://certstream.calidog.io/") + except KeyboardInterrupt: + print(f"\n[+] Monitoring stopped. {len(self.alerts)} alerts collected.") + return self.alerts + + def _is_suspicious(self, domain): + """Check if domain is suspicious relative to watched domains.""" + for watched in self.watched_domains: + # Exact keyword match + watched_base = watched.split(".")[0] + if watched_base in domain and domain != watched: + return True + + # Levenshtein distance (typosquatting detection) + domain_base = tldextract.extract(domain).domain + similarity = Levenshtein.ratio(watched_base, domain_base) + if similarity >= self.threshold and domain_base != watched_base: + return True + + # Brand keyword match + for keyword in self.brand_keywords: + if keyword in domain: + return True + + return False + + def _get_reason(self, domain): + """Determine why domain was flagged.""" + reasons = [] + for watched in self.watched_domains: + watched_base = watched.split(".")[0] + if watched_base in domain: + reasons.append(f"contains '{watched_base}'") + domain_base = tldextract.extract(domain).domain + similarity = Levenshtein.ratio(watched_base, domain_base) + if similarity >= self.threshold and domain_base != watched_base: + reasons.append(f"similar to '{watched}' ({similarity:.0%})") + for kw in self.brand_keywords: + if kw in domain: + reasons.append(f"brand keyword '{kw}'") + return "; ".join(reasons) if reasons else "unknown" + +cs_monitor = CertstreamMonitor( + watched_domains=["mycompany.com"], + brand_keywords=["mycompany", "mybrand"], + similarity_threshold=0.75, +) +alerts = cs_monitor.start_monitoring(max_alerts=50) +``` + +### Step 3: Enumerate Subdomains from CT Logs + +```python +def enumerate_subdomains_ct(domain): + """Discover all subdomains from Certificate Transparency logs.""" + params = {"q": f"%.{domain}", "output": "json"} + resp = requests.get("https://crt.sh", params=params, timeout=30) + + if resp.status_code != 200: + return [] + + certs = resp.json() + subdomains = set() + for cert in certs: + name_value = cert.get("name_value", "") + for name in name_value.split("\n"): + name = name.strip().lower() + if name.endswith(f".{domain}") or name == domain: + name = name.lstrip("*.") + subdomains.add(name) + + sorted_subs = sorted(subdomains) + print(f"[+] CT subdomain enumeration for {domain}: {len(sorted_subs)} subdomains") + return sorted_subs + +subdomains = enumerate_subdomains_ct("example.com") +for sub in subdomains[:20]: + print(f" {sub}") +``` + +### Step 4: Generate CT Intelligence Report + +```python +def generate_ct_report(suspicious_certs, certstream_alerts, domain): + report = f"""# Certificate Transparency Intelligence Report +## Target Domain: {domain} +## Generated: {datetime.now().isoformat()} + +## Summary +- Suspicious certificates found: {len(suspicious_certs)} +- Real-time alerts triggered: {len(certstream_alerts)} + +## Suspicious Certificates (crt.sh) +| Common Name | Issuer | Flags | crt.sh Link | +|------------|--------|-------|-------------| +""" + for cert in suspicious_certs[:20]: + flags = "; ".join(cert.get("flags", [])) + report += (f"| {cert['common_name']} | {cert['issuer'][:30]} " + f"| {flags} | [View]({cert['crt_sh_url']}) |\n") + + report += f""" +## Real-Time Certstream Alerts +| Domain | Issuer | Reason | Detected | +|--------|--------|--------|----------| +""" + for alert in certstream_alerts[:20]: + report += (f"| {alert['domain']} | {alert['issuer']} " + f"| {alert['reason']} | {alert['detected_at'][:19]} |\n") + + report += """ +## Recommendations +1. Add flagged domains to DNS sinkhole / web proxy blocklist +2. Submit takedown requests for confirmed phishing domains +3. Monitor CT logs continuously for new certificate registrations +4. Implement CAA DNS records to restrict certificate issuance for your domains +5. Deploy DMARC to prevent email spoofing from lookalike domains +""" + with open(f"ct_report_{domain.replace('.','_')}.md", "w") as f: + f.write(report) + print(f"[+] CT report saved") + return report + +generate_ct_report(suspicious, alerts if 'alerts' in dir() else [], "mycompany.com") +``` + +## Validation Criteria + +- crt.sh queries return certificate data for target domains +- Suspicious certificates identified based on lookalike patterns +- Certstream real-time monitoring detects new phishing certificates +- Subdomain enumeration produces comprehensive list from CT logs +- Alerts generated with reason classification +- CT intelligence report created with actionable recommendations + +## References + +- [crt.sh Certificate Search](https://crt.sh/) +- [Certstream Real-Time CT Monitor](https://certstream.calidog.io/) +- [River Security: CT Logs for Attack Surface Discovery](https://riversecurity.eu/finding-attack-surface-and-fraudulent-domains-via-certificate-transparency-logs/) +- [Let's Encrypt: Certificate Transparency Logs](https://letsencrypt.org/docs/ct-logs/) +- [SSLMate Cert Spotter](https://sslmate.com/certspotter/) +- [CyberSierra: CT Logs as Early Warning System](https://cybersierra.co/blog/ssl-certificate-transparency-logs/) diff --git a/personas/_shared/skills/analyzing-certificate-transparency-for-phishing/references/api-reference.md b/personas/_shared/skills/analyzing-certificate-transparency-for-phishing/references/api-reference.md new file mode 100644 index 0000000..e0a2544 --- /dev/null +++ b/personas/_shared/skills/analyzing-certificate-transparency-for-phishing/references/api-reference.md @@ -0,0 +1,97 @@ +# API Reference: Certificate Transparency Phishing Detection + +## crt.sh API + +### Search Certificates +```bash +# JSON output +curl "https://crt.sh/?q=%.example.com&output=json" + +# Exclude expired +curl "https://crt.sh/?q=%.example.com&output=json&exclude=expired" + +# Exact match +curl "https://crt.sh/?q=example.com&output=json" +``` + +### Response Fields +| Field | Description | +|-------|-------------| +| `id` | Certificate ID in crt.sh database | +| `common_name` | Certificate CN | +| `name_value` | All SANs (newline-separated) | +| `issuer_name` | Certificate Authority | +| `not_before` | Validity start | +| `not_after` | Validity end | +| `serial_number` | Certificate serial | + +## Certstream - Real-time CT Monitoring + +### Python Client +```python +import certstream + +def callback(message, context): + if message["message_type"] == "certificate_update": + data = message["data"] + domains = data["leaf_cert"]["all_domains"] + for domain in domains: + if "example" in domain: + print(f"[ALERT] {domain}") + +certstream.listen_for_events(callback, url="wss://certstream.calidog.io/") +``` + +### Message Fields +| Field | Path | +|-------|------| +| Domains | `data.leaf_cert.all_domains` | +| Issuer | `data.leaf_cert.issuer.O` | +| Subject | `data.leaf_cert.subject.CN` | +| Fingerprint | `data.leaf_cert.fingerprint` | +| Source | `data.source.name` | + +## CT Log Servers + +| Log | Operator | URL | +|-----|----------|-----| +| Argon | Google | `ct.googleapis.com/logs/argon2024` | +| Xenon | Google | `ct.googleapis.com/logs/xenon2024` | +| Nimbus | Cloudflare | `ct.cloudflare.com/logs/nimbus2024` | +| Oak | Let's Encrypt | `oak.ct.letsencrypt.org/2024h1` | +| Yeti | DigiCert | `yeti2024.ct.digicert.com/log` | + +## Phishing Detection Techniques + +### Homoglyph / IDN Attacks +| Original | Lookalike | Technique | +|----------|-----------|-----------| +| example.com | examp1e.com | Character substitution (l→1) | +| google.com | gооgle.com | Cyrillic о (U+043E) | +| paypal.com | paypa1.com | l→1 substitution | +| microsoft.com | mіcrosoft.com | Cyrillic і (U+0456) | + +### dnstwist Integration +```bash +dnstwist -r -f json example.com # Generate and resolve permutations +dnstwist -w wordlist.txt example.com # Dictionary-based +``` + +## Certificate Details Lookup +```bash +# Get full certificate from crt.sh +curl "https://crt.sh/?d=" + +# OpenSSL inspection +openssl s_client -connect domain.com:443 -servername domain.com /dev/null | \ + openssl x509 -noout -text +``` + +## Suspicious Indicators +| Pattern | Risk Level | +|---------|-----------| +| Free CA + new domain + brand keyword | HIGH | +| Wildcard cert on recently registered domain | HIGH | +| Multiple certs for slight domain variants | MEDIUM | +| IDN/punycode domain mimicking brand | HIGH | +| Cert issued same day as domain registration | MEDIUM | diff --git a/personas/_shared/skills/analyzing-certificate-transparency-for-phishing/scripts/agent.py b/personas/_shared/skills/analyzing-certificate-transparency-for-phishing/scripts/agent.py new file mode 100644 index 0000000..7ff7597 --- /dev/null +++ b/personas/_shared/skills/analyzing-certificate-transparency-for-phishing/scripts/agent.py @@ -0,0 +1,210 @@ +#!/usr/bin/env python3 +"""Certificate Transparency monitoring agent for phishing detection. + +Queries crt.sh for certificates matching target domains, detects lookalike +certificates, and identifies potential phishing infrastructure. +""" + +import json +import sys +from collections import defaultdict + +try: + import requests + HAS_REQUESTS = True +except ImportError: + HAS_REQUESTS = False + + +def query_crtsh(domain, wildcard=True, expired=False): + """Query crt.sh for certificates matching a domain.""" + if not HAS_REQUESTS: + return [] + query = f"%.{domain}" if wildcard else domain + params = {"q": query, "output": "json"} + if not expired: + params["exclude"] = "expired" + try: + resp = requests.get("https://crt.sh/", params=params, timeout=30) + resp.raise_for_status() + return resp.json() + except (requests.RequestException, json.JSONDecodeError) as e: + return [{"error": str(e)}] + + +def find_lookalike_domains(target_domain, ct_results): + """Identify certificates for domains that look similar to the target.""" + base = target_domain.split(".")[0].lower() + lookalikes = [] + for cert in ct_results: + cn = cert.get("common_name", "").lower() + names = cert.get("name_value", "").lower().split("\n") + for name in [cn] + names: + name = name.strip() + if not name or name == target_domain: + continue + similarity = calculate_similarity(base, name.split(".")[0]) + if similarity > 0.6 and name != target_domain: + lookalikes.append({ + "domain": name, + "similarity": round(similarity, 3), + "issuer": cert.get("issuer_name", ""), + "not_before": cert.get("not_before", ""), + "not_after": cert.get("not_after", ""), + "cert_id": cert.get("id"), + }) + seen = set() + unique = [] + for l in sorted(lookalikes, key=lambda x: -x["similarity"]): + if l["domain"] not in seen: + seen.add(l["domain"]) + unique.append(l) + return unique + + +def calculate_similarity(s1, s2): + """Calculate string similarity using Levenshtein-like ratio.""" + if s1 == s2: + return 1.0 + len1, len2 = len(s1), len(s2) + if len1 == 0 or len2 == 0: + return 0.0 + matrix = [[0] * (len2 + 1) for _ in range(len1 + 1)] + for i in range(len1 + 1): + matrix[i][0] = i + for j in range(len2 + 1): + matrix[0][j] = j + for i in range(1, len1 + 1): + for j in range(1, len2 + 1): + cost = 0 if s1[i-1] == s2[j-1] else 1 + matrix[i][j] = min(matrix[i-1][j] + 1, matrix[i][j-1] + 1, + matrix[i-1][j-1] + cost) + distance = matrix[len1][len2] + return 1.0 - distance / max(len1, len2) + + +HOMOGLYPH_MAP = { + "a": ["а", "@", "4"], "e": ["е", "3"], "o": ["о", "0"], + "i": ["і", "1", "l"], "l": ["1", "i", "I"], + "s": ["5", "$"], "t": ["7"], "g": ["9", "q"], +} + + +def detect_homoglyph_domains(target_domain, ct_results): + """Detect domains using homoglyph/IDN attacks against target.""" + findings = [] + base = target_domain.split(".")[0].lower() + for cert in ct_results: + names = cert.get("name_value", "").lower().split("\n") + for name in names: + name = name.strip() + if not name or name == target_domain: + continue + name_base = name.split(".")[0] + if len(name_base) == len(base): + diffs = sum(1 for a, b in zip(base, name_base) if a != b) + if 0 < diffs <= 2: + findings.append({ + "domain": name, + "char_differences": diffs, + "cert_id": cert.get("id"), + "issuer": cert.get("issuer_name", ""), + }) + return findings + + +def analyze_issuer_patterns(ct_results): + """Analyze certificate issuer patterns for anomalies.""" + issuer_counts = defaultdict(int) + free_cas = ["Let's Encrypt", "ZeroSSL", "Buypass"] + for cert in ct_results: + issuer = cert.get("issuer_name", "Unknown") + issuer_counts[issuer] += 1 + free_ca_certs = sum( + count for issuer, count in issuer_counts.items() + if any(ca.lower() in issuer.lower() for ca in free_cas) + ) + return { + "issuers": dict(issuer_counts), + "total_certs": len(ct_results), + "free_ca_count": free_ca_certs, + "free_ca_ratio": round(free_ca_certs / max(len(ct_results), 1), 3), + } + + +def detect_wildcard_abuse(ct_results): + """Detect suspicious wildcard certificate patterns.""" + wildcards = [] + for cert in ct_results: + cn = cert.get("common_name", "") + if cn.startswith("*."): + wildcards.append({ + "domain": cn, + "issuer": cert.get("issuer_name", ""), + "not_before": cert.get("not_before", ""), + }) + return wildcards + + +def generate_report(target_domain, ct_results): + """Generate comprehensive CT monitoring report.""" + lookalikes = find_lookalike_domains(target_domain, ct_results) + homoglyphs = detect_homoglyph_domains(target_domain, ct_results) + issuer_analysis = analyze_issuer_patterns(ct_results) + wildcards = detect_wildcard_abuse(ct_results) + + risk_score = 0 + risk_score += min(len(lookalikes) * 10, 40) + risk_score += min(len(homoglyphs) * 15, 30) + risk_score += 20 if issuer_analysis["free_ca_ratio"] > 0.8 else 0 + risk_score = min(risk_score, 100) + + return { + "target_domain": target_domain, + "total_certificates": len(ct_results), + "lookalike_domains": lookalikes[:20], + "homoglyph_domains": homoglyphs[:20], + "issuer_analysis": issuer_analysis, + "wildcard_certs": wildcards[:10], + "risk_score": risk_score, + "risk_level": "HIGH" if risk_score >= 60 else "MEDIUM" if risk_score >= 30 else "LOW", + } + + +if __name__ == "__main__": + print("=" * 60) + print("Certificate Transparency Phishing Detection Agent") + print("crt.sh queries, lookalike detection, homoglyph analysis") + print("=" * 60) + + domain = sys.argv[1] if len(sys.argv) > 1 else None + + if not domain: + print("\n[DEMO] Usage: python agent.py ") + print(" e.g. python agent.py example.com") + sys.exit(0) + + if not HAS_REQUESTS: + print("[!] Install requests: pip install requests") + sys.exit(1) + + print(f"\n[*] Querying crt.sh for: {domain}") + results = query_crtsh(domain) + print(f"[*] Found {len(results)} certificates") + + report = generate_report(domain, results) + + print(f"\n--- Lookalike Domains ({len(report['lookalike_domains'])}) ---") + for l in report["lookalike_domains"][:10]: + print(f" [{l['similarity']:.3f}] {l['domain']} (issuer: {l['issuer'][:40]})") + + print(f"\n--- Homoglyph Domains ({len(report['homoglyph_domains'])}) ---") + for h in report["homoglyph_domains"][:10]: + print(f" [diff={h['char_differences']}] {h['domain']}") + + print(f"\n--- Issuer Analysis ---") + for issuer, count in sorted(report["issuer_analysis"]["issuers"].items(), + key=lambda x: -x[1])[:5]: + print(f" {count:4d} | {issuer[:60]}") + + print(f"\n[*] Risk Score: {report['risk_score']}/100 ({report['risk_level']})") diff --git a/personas/_shared/skills/analyzing-cloud-storage-access-patterns/LICENSE b/personas/_shared/skills/analyzing-cloud-storage-access-patterns/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-cloud-storage-access-patterns/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-cloud-storage-access-patterns/SKILL.md b/personas/_shared/skills/analyzing-cloud-storage-access-patterns/SKILL.md new file mode 100644 index 0000000..a614987 --- /dev/null +++ b/personas/_shared/skills/analyzing-cloud-storage-access-patterns/SKILL.md @@ -0,0 +1,70 @@ +--- +name: analyzing-cloud-storage-access-patterns +description: Detect abnormal access patterns in AWS S3, GCS, and Azure Blob Storage by analyzing CloudTrail Data Events, GCS + audit logs, and Azure Storage Analytics. Identifies after-hours bulk downloads, access from new IP addresses, unusual API + calls (GetObject spikes), and potential data exfiltration using statistical baselines and time-series anomaly detection. +domain: cybersecurity +subdomain: cloud-security +tags: +- analyzing +- cloud +- storage +- access +version: '1.0' +author: mahipal +license: Apache-2.0 +atlas_techniques: +- AML.T0024 +- AML.T0056 +nist_ai_rmf: +- MEASURE-2.7 +- MAP-5.1 +- MANAGE-2.4 +nist_csf: +- PR.IR-01 +- ID.AM-08 +- GV.SC-06 +- DE.CM-01 +--- + + +# Analyzing Cloud Storage Access Patterns + + +## When to Use + +- When investigating security incidents that require analyzing cloud storage access patterns +- When building detection rules or threat hunting queries for this domain +- When SOC analysts need structured procedures for this analysis type +- When validating security monitoring coverage for related attack techniques + +## Prerequisites + +- Familiarity with cloud security concepts and tools +- Access to a test or lab environment for safe execution +- Python 3.8+ with required dependencies installed +- Appropriate authorization for any testing activities + +## Instructions + +1. Install dependencies: `pip install boto3 requests` +2. Query CloudTrail for S3 Data Events using AWS CLI or boto3. +3. Build access baselines: hourly request volume, per-user object counts, source IP history. +4. Detect anomalies: + - After-hours access (outside 8am-6pm local time) + - Bulk downloads: >100 GetObject calls from single principal in 1 hour + - New source IPs not seen in the prior 30 days + - ListBucket enumeration spikes (reconnaissance indicator) +5. Generate prioritized findings report. + +```bash +python scripts/agent.py --bucket my-sensitive-data --hours-back 24 --output s3_access_report.json +``` + +## Examples + +### CloudTrail S3 Data Event +```json +{"eventName": "GetObject", "requestParameters": {"bucketName": "sensitive-data", "key": "financials/q4.xlsx"}, + "sourceIPAddress": "203.0.113.50", "userIdentity": {"arn": "arn:aws:iam::123456789012:user/analyst"}} +``` diff --git a/personas/_shared/skills/analyzing-cloud-storage-access-patterns/references/api-reference.md b/personas/_shared/skills/analyzing-cloud-storage-access-patterns/references/api-reference.md new file mode 100644 index 0000000..b446bf1 --- /dev/null +++ b/personas/_shared/skills/analyzing-cloud-storage-access-patterns/references/api-reference.md @@ -0,0 +1,49 @@ +# API Reference: Cloud Storage Access Pattern Analysis + +## AWS CLI - CloudTrail Lookup +```bash +aws cloudtrail lookup-events \ + --lookup-attributes AttributeKey=ResourceType,AttributeValue=AWS::S3::Object \ + --start-time 2024-01-15T00:00:00Z \ + --output json +``` + +## CloudTrail S3 Data Event Structure +```json +{ + "EventTime": "2024-01-15T10:30:00Z", + "EventName": "GetObject", + "Username": "analyst", + "CloudTrailEvent": "{\"sourceIPAddress\":\"10.0.0.1\",\"userAgent\":\"aws-cli\",\"requestParameters\":{\"bucketName\":\"data\",\"key\":\"file.csv\"},\"userIdentity\":{\"arn\":\"arn:aws:iam::123:user/analyst\"}}" +} +``` + +## Key S3 Event Names +| Event | Meaning | +|-------|---------| +| GetObject | Object download | +| PutObject | Object upload | +| DeleteObject | Object deletion | +| ListBucket / ListObjectsV2 | Bucket enumeration | +| GetBucketPolicy | Policy read | +| PutBucketPolicy | Policy modification | + +## Detection Thresholds +| Anomaly | Threshold | Severity | +|---------|-----------|----------| +| Bulk download | >100 GetObject/hr per user | Critical | +| After-hours | Access outside 08:00-18:00 UTC | Medium | +| New source IP | IP not in 30-day baseline | High | +| Enumeration | >20 ListBucket per user | High | + +## boto3 CloudTrail Client (alternative) +```python +import boto3 +client = boto3.client("cloudtrail") +response = client.lookup_events( + LookupAttributes=[{"AttributeKey":"ResourceType","AttributeValue":"AWS::S3::Object"}], + StartTime=datetime(2024,1,15), + MaxResults=50 +) +events = response["Events"] +``` diff --git a/personas/_shared/skills/analyzing-cloud-storage-access-patterns/scripts/agent.py b/personas/_shared/skills/analyzing-cloud-storage-access-patterns/scripts/agent.py new file mode 100644 index 0000000..ec818c0 --- /dev/null +++ b/personas/_shared/skills/analyzing-cloud-storage-access-patterns/scripts/agent.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python3 +"""Cloud Storage Access Pattern Analyzer - Detects abnormal S3/GCS/Azure Blob access via CloudTrail.""" + +import json +import logging +import argparse +import subprocess +from collections import defaultdict +from datetime import datetime, timedelta + +logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s") +logger = logging.getLogger(__name__) + + +def query_cloudtrail_s3_events(bucket_name, hours_back=24): + """Query CloudTrail for S3 data events on a specific bucket.""" + start_time = (datetime.utcnow() - timedelta(hours=hours_back)).strftime("%Y-%m-%dT%H:%M:%SZ") + cmd = [ + "aws", "cloudtrail", "lookup-events", + "--lookup-attributes", f"AttributeKey=ResourceType,AttributeValue=AWS::S3::Object", + "--start-time", start_time, + "--output", "json", + ] + result = subprocess.run(cmd, capture_output=True, text=True, timeout=120) + if result.returncode != 0: + logger.error("CloudTrail query failed: %s", result.stderr[:200]) + return [] + events = json.loads(result.stdout).get("Events", []) + s3_events = [] + for event in events: + ct_event = json.loads(event.get("CloudTrailEvent", "{}")) + req_params = ct_event.get("requestParameters", {}) + if req_params.get("bucketName") == bucket_name or not bucket_name: + s3_events.append({ + "timestamp": event.get("EventTime", ""), + "event_name": event.get("EventName", ""), + "username": event.get("Username", ""), + "source_ip": ct_event.get("sourceIPAddress", ""), + "user_agent": ct_event.get("userAgent", ""), + "bucket": req_params.get("bucketName", ""), + "key": req_params.get("key", ""), + "user_arn": ct_event.get("userIdentity", {}).get("arn", ""), + }) + logger.info("Retrieved %d S3 events for bucket '%s'", len(s3_events), bucket_name or "all") + return s3_events + + +def detect_bulk_downloads(events, threshold=100): + """Detect bulk GetObject operations from a single principal.""" + user_downloads = defaultdict(list) + for event in events: + if event["event_name"] == "GetObject": + user_downloads[event["user_arn"]].append(event) + alerts = [] + for user_arn, downloads in user_downloads.items(): + if len(downloads) >= threshold: + keys = [d["key"] for d in downloads] + alerts.append({ + "user_arn": user_arn, + "download_count": len(downloads), + "unique_keys": len(set(keys)), + "source_ips": list({d["source_ip"] for d in downloads}), + "first_access": downloads[0]["timestamp"], + "last_access": downloads[-1]["timestamp"], + "severity": "critical", + "indicator": "Bulk download (potential exfiltration)", + }) + logger.info("Found %d bulk download alerts", len(alerts)) + return alerts + + +def detect_after_hours_access(events, business_start=8, business_end=18): + """Detect access outside business hours.""" + after_hours = [] + for event in events: + try: + ts = event["timestamp"] + if isinstance(ts, str): + dt = datetime.fromisoformat(ts.replace("Z", "+00:00")) + else: + dt = ts + hour = dt.hour + if hour < business_start or hour >= business_end: + event["indicator"] = f"After-hours access at {hour:02d}:00 UTC" + event["severity"] = "medium" + after_hours.append(event) + except (ValueError, AttributeError): + continue + logger.info("Found %d after-hours access events", len(after_hours)) + return after_hours + + +def detect_new_source_ips(events, known_ips=None): + """Detect access from IP addresses not in the known baseline.""" + if known_ips is None: + known_ips = set() + new_ip_events = [] + for event in events: + ip = event["source_ip"] + if ip and ip not in known_ips and not ip.startswith("AWS Internal"): + event["indicator"] = f"New source IP: {ip}" + event["severity"] = "high" + new_ip_events.append(event) + unique_new = len({e["source_ip"] for e in new_ip_events}) + logger.info("Found %d events from %d new source IPs", len(new_ip_events), unique_new) + return new_ip_events + + +def detect_enumeration(events, threshold=20): + """Detect ListBucket/ListObjects enumeration patterns.""" + user_listings = defaultdict(int) + for event in events: + if event["event_name"] in ("ListBucket", "ListObjects", "ListObjectsV2"): + user_listings[event["user_arn"]] += 1 + alerts = [] + for user_arn, count in user_listings.items(): + if count >= threshold: + alerts.append({ + "user_arn": user_arn, + "list_count": count, + "severity": "high", + "indicator": "Bucket enumeration spike (reconnaissance)", + }) + return alerts + + +def build_access_baseline(events): + """Build statistical baseline of normal access patterns.""" + hourly_counts = defaultdict(int) + user_counts = defaultdict(int) + ip_set = set() + for event in events: + try: + ts = event["timestamp"] + if isinstance(ts, str): + dt = datetime.fromisoformat(ts.replace("Z", "+00:00")) + hourly_counts[dt.hour] += 1 + except (ValueError, AttributeError): + pass + user_counts[event["user_arn"]] += 1 + if event["source_ip"]: + ip_set.add(event["source_ip"]) + return { + "hourly_distribution": dict(hourly_counts), + "user_request_counts": dict(user_counts), + "known_ips": list(ip_set), + "total_events": len(events), + } + + +def generate_report(events, bulk_alerts, after_hours, new_ips, enum_alerts, baseline): + """Generate cloud storage access analysis report.""" + report = { + "timestamp": datetime.utcnow().isoformat(), + "total_events_analyzed": len(events), + "bulk_download_alerts": bulk_alerts, + "after_hours_access": len(after_hours), + "new_source_ip_events": len(new_ips), + "enumeration_alerts": enum_alerts, + "baseline_summary": { + "known_ips": len(baseline.get("known_ips", [])), + "total_baseline_events": baseline.get("total_events", 0), + }, + "sample_after_hours": after_hours[:10], + "sample_new_ips": new_ips[:10], + } + total_alerts = len(bulk_alerts) + len(enum_alerts) + (1 if new_ips else 0) + print(f"CLOUD STORAGE REPORT: {len(events)} events, {total_alerts} alerts") + return report + + +def main(): + parser = argparse.ArgumentParser(description="Cloud Storage Access Pattern Analyzer") + parser.add_argument("--bucket", default="", help="S3 bucket name to analyze") + parser.add_argument("--hours-back", type=int, default=24) + parser.add_argument("--bulk-threshold", type=int, default=100) + parser.add_argument("--known-ips-file", help="File with known IP baselines") + parser.add_argument("--output", default="s3_access_report.json") + args = parser.parse_args() + + events = query_cloudtrail_s3_events(args.bucket, args.hours_back) + baseline = build_access_baseline(events) + known_ips = set(baseline.get("known_ips", [])) + if args.known_ips_file: + with open(args.known_ips_file) as f: + known_ips.update(line.strip() for line in f if line.strip()) + + bulk_alerts = detect_bulk_downloads(events, args.bulk_threshold) + after_hours = detect_after_hours_access(events) + new_ips = detect_new_source_ips(events, known_ips) + enum_alerts = detect_enumeration(events) + + report = generate_report(events, bulk_alerts, after_hours, new_ips, enum_alerts, baseline) + with open(args.output, "w") as f: + json.dump(report, f, indent=2, default=str) + logger.info("Report saved to %s", args.output) + + +if __name__ == "__main__": + main() diff --git a/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/LICENSE b/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/SKILL.md b/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/SKILL.md new file mode 100644 index 0000000..ebc4c11 --- /dev/null +++ b/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/SKILL.md @@ -0,0 +1,381 @@ +--- +name: analyzing-cobalt-strike-beacon-configuration +description: Extract and analyze Cobalt Strike beacon configuration from PE files and memory dumps to identify C2 infrastructure, + malleable profiles, and operator tradecraft. +domain: cybersecurity +subdomain: malware-analysis +tags: +- cobalt-strike +- beacon +- c2 +- malware-analysis +- config-extraction +- threat-hunting +- red-team-tools +version: '1.0' +author: mahipal +license: Apache-2.0 +nist_csf: +- DE.AE-02 +- RS.AN-03 +- ID.RA-01 +- DE.CM-01 +--- +# Analyzing Cobalt Strike Beacon Configuration + +## Overview + +Cobalt Strike is a commercial adversary simulation tool widely abused by threat actors for post-exploitation operations. Beacon payloads contain embedded configuration data that reveals C2 server addresses, communication protocols, sleep intervals, jitter values, malleable C2 profile settings, watermark identifiers, and encryption keys. Extracting this configuration from PE files, shellcode, or memory dumps is critical for incident responders to map attacker infrastructure and attribute campaigns. The beacon configuration is XOR-encoded using a single byte (0x69 for version 3, 0x2e for version 4) and stored in a Type-Length-Value (TLV) format within the .data section. + + +## When to Use + +- When investigating security incidents that require analyzing cobalt strike beacon configuration +- When building detection rules or threat hunting queries for this domain +- When SOC analysts need structured procedures for this analysis type +- When validating security monitoring coverage for related attack techniques + +## Prerequisites + +- Python 3.9+ with `dissect.cobaltstrike`, `pefile`, `yara-python` +- SentinelOne CobaltStrikeParser (`parse_beacon_config.py`) +- Hex editor (010 Editor, HxD) for manual inspection +- Understanding of PE file format and XOR encoding +- Memory dump acquisition tools (Volatility3, WinDbg) +- Network analysis tools (Wireshark) for C2 traffic correlation + +## Key Concepts + +### Beacon Configuration Structure + +Cobalt Strike beacons store their configuration as a blob of TLV (Type-Length-Value) entries within the .data section of the PE. Stageless beacons XOR the entire beacon code with a 4-byte key. The configuration blob itself uses a single-byte XOR key. Each TLV entry contains a 2-byte type identifier (e.g., 0x0001 for BeaconType, 0x0008 for C2Server), a 2-byte length, and variable-length data. + +### Malleable C2 Profiles + +The beacon configuration encodes the malleable C2 profile that dictates HTTP request/response transformations, including URI paths, headers, metadata encoding (Base64, NetBIOS), and data transforms. Analyzing these settings reveals how the beacon disguises its traffic to blend with legitimate web traffic. + +### Watermark and License Identification + +Each Cobalt Strike license embeds a unique watermark (4-byte integer) into generated beacons. Extracting the watermark can link multiple beacons to the same operator or cracked license. Known watermark databases maintained by threat intelligence providers map watermarks to specific threat actors or leaked license keys. + +## Workflow + +### Step 1: Extract Configuration with CobaltStrikeParser + +```python +#!/usr/bin/env python3 +"""Extract Cobalt Strike beacon config from PE or memory dump.""" +import sys +import json + +# Using SentinelOne's CobaltStrikeParser +# pip install dissect.cobaltstrike +from dissect.cobaltstrike.beacon import BeaconConfig + +def extract_beacon_config(filepath): + """Parse beacon configuration from file.""" + configs = list(BeaconConfig.from_path(filepath)) + + if not configs: + print(f"[-] No beacon configuration found in {filepath}") + return None + + for i, config in enumerate(configs): + print(f"\n[+] Beacon Configuration #{i+1}") + print(f"{'='*60}") + + settings = config.as_dict() + + # Critical fields for incident response + critical_fields = [ + "SETTING_C2_REQUEST", + "SETTING_C2_RECOVER", + "SETTING_PUBKEY", + "SETTING_DOMAINS", + "SETTING_BEACONTYPE", + "SETTING_PORT", + "SETTING_SLEEPTIME", + "SETTING_JITTER", + "SETTING_MAXGET", + "SETTING_SPAWNTO_X86", + "SETTING_SPAWNTO_X64", + "SETTING_PIPENAME", + "SETTING_WATERMARK", + "SETTING_C2_VERB_GET", + "SETTING_C2_VERB_POST", + "SETTING_USERAGENT", + "SETTING_PROTOCOL", + ] + + for field in critical_fields: + value = settings.get(field, "N/A") + print(f" {field}: {value}") + + return settings + + return None + + +def extract_c2_indicators(config): + """Extract actionable C2 indicators from beacon config.""" + indicators = { + "c2_domains": [], + "c2_ips": [], + "c2_urls": [], + "user_agent": "", + "named_pipes": [], + "spawn_processes": [], + "watermark": "", + } + + if not config: + return indicators + + # Extract C2 domains + domains = config.get("SETTING_DOMAINS", "") + if domains: + for domain in str(domains).split(","): + domain = domain.strip().rstrip("/") + if domain: + indicators["c2_domains"].append(domain) + + # Extract user agent + indicators["user_agent"] = str(config.get("SETTING_USERAGENT", "")) + + # Extract named pipes + pipe = config.get("SETTING_PIPENAME", "") + if pipe: + indicators["named_pipes"].append(str(pipe)) + + # Extract spawn-to processes + for arch in ["SETTING_SPAWNTO_X86", "SETTING_SPAWNTO_X64"]: + proc = config.get(arch, "") + if proc: + indicators["spawn_processes"].append(str(proc)) + + # Extract watermark + indicators["watermark"] = str(config.get("SETTING_WATERMARK", "")) + + return indicators + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print(f"Usage: {sys.argv[0]} ") + sys.exit(1) + + config = extract_beacon_config(sys.argv[1]) + if config: + indicators = extract_c2_indicators(config) + print(f"\n[+] Extracted C2 Indicators:") + print(json.dumps(indicators, indent=2)) +``` + +### Step 2: Manual XOR Decryption of Beacon Config + +```python +import struct + +def find_and_decrypt_config(data): + """Manually locate and decrypt beacon configuration.""" + # Cobalt Strike 4.x uses 0x2e as XOR key + xor_keys = [0x2e, 0x69] # v4, v3 + + for xor_key in xor_keys: + # Search for the config magic bytes after XOR + # Config starts with 0x0001 (BeaconType) XOR'd with key + magic = bytes([0x00 ^ xor_key, 0x01 ^ xor_key, + 0x00 ^ xor_key, 0x02 ^ xor_key]) + + offset = data.find(magic) + if offset == -1: + continue + + print(f"[+] Found config at offset 0x{offset:x} (XOR key: 0x{xor_key:02x})") + + # Decrypt the config blob (typically 4096 bytes) + config_size = 4096 + encrypted = data[offset:offset + config_size] + decrypted = bytes([b ^ xor_key for b in encrypted]) + + # Parse TLV entries + entries = parse_tlv(decrypted) + return entries + + return None + + +def parse_tlv(data): + """Parse Type-Length-Value configuration entries.""" + entries = {} + offset = 0 + + # TLV field type mapping + field_names = { + 0x0001: "BeaconType", + 0x0002: "Port", + 0x0003: "SleepTime", + 0x0004: "MaxGetSize", + 0x0005: "Jitter", + 0x0006: "MaxDNS", + 0x0007: "Deprecated_PublicKey", + 0x0008: "C2Server", + 0x0009: "UserAgent", + 0x000a: "PostURI", + 0x000b: "Malleable_C2_Instructions", + 0x000c: "Deprecated_HttpGet_Metadata", + 0x000d: "SpawnTo_x86", + 0x000e: "SpawnTo_x64", + 0x000f: "CryptoScheme", + 0x001a: "Watermark", + 0x001d: "C2_HostHeader", + 0x0024: "PipeName", + 0x0025: "Year", + 0x0026: "Month", + 0x0027: "Day", + 0x0036: "ProxyHostname", + } + + while offset + 6 <= len(data): + entry_type = struct.unpack(">H", data[offset:offset+2])[0] + entry_len_type = struct.unpack(">H", data[offset+2:offset+4])[0] + entry_len = struct.unpack(">H", data[offset+4:offset+6])[0] + + if entry_type == 0: + break + + value_start = offset + 6 + value_end = value_start + entry_len + value_data = data[value_start:value_end] + + field_name = field_names.get(entry_type, f"Unknown_0x{entry_type:04x}") + + if entry_len_type == 1: # Short + value = struct.unpack(">H", value_data[:2])[0] + elif entry_len_type == 2: # Int + value = struct.unpack(">I", value_data[:4])[0] + elif entry_len_type == 3: # String/Blob + value = value_data.rstrip(b'\x00').decode('utf-8', errors='replace') + else: + value = value_data.hex() + + entries[field_name] = value + print(f" {field_name}: {value}") + + offset = value_end + + return entries +``` + +### Step 3: YARA Rule for Beacon Detection + +```python +import yara + +cobalt_strike_rule = """ +rule CobaltStrike_Beacon_Config { + meta: + description = "Detects Cobalt Strike beacon configuration" + author = "Malware Analysis Team" + date = "2025-01-01" + + strings: + // XOR'd config marker for CS 4.x (key 0x2e) + $config_v4 = { 2e 2f 2e 2c } + + // XOR'd config marker for CS 3.x (key 0x69) + $config_v3 = { 69 68 69 6b } + + // Common beacon strings + $str_pipe = "\\\\.\\pipe\\" ascii wide + $str_beacon = "beacon" ascii nocase + $str_sleeptime = "sleeptime" ascii nocase + + // Reflective loader pattern + $reflective = { 4D 5A 41 52 55 48 89 E5 } + + condition: + ($config_v4 or $config_v3) or + (2 of ($str_*) and $reflective) +} +""" + +def scan_for_beacons(filepath): + """Scan file with YARA rules for Cobalt Strike beacons.""" + rules = yara.compile(source=cobalt_strike_rule) + matches = rules.match(filepath) + + for match in matches: + print(f"[+] YARA Match: {match.rule}") + for string_match in match.strings: + offset = string_match.instances[0].offset + print(f" String: {string_match.identifier} at offset 0x{offset:x}") + + return matches +``` + +### Step 4: Network Traffic Correlation + +```python +from dissect.cobaltstrike.c2 import HttpC2Config + +def analyze_c2_profile(beacon_config): + """Analyze malleable C2 profile from beacon configuration.""" + print("\n[+] Malleable C2 Profile Analysis") + print("=" * 60) + + # HTTP GET configuration + get_verb = beacon_config.get("SETTING_C2_VERB_GET", "GET") + get_uri = beacon_config.get("SETTING_C2_REQUEST", "") + print(f"\n HTTP GET Request:") + print(f" Verb: {get_verb}") + print(f" URI: {get_uri}") + + # HTTP POST configuration + post_verb = beacon_config.get("SETTING_C2_VERB_POST", "POST") + post_uri = beacon_config.get("SETTING_C2_POSTREQ", "") + print(f"\n HTTP POST Request:") + print(f" Verb: {post_verb}") + print(f" URI: {post_uri}") + + # User Agent + ua = beacon_config.get("SETTING_USERAGENT", "") + print(f"\n User-Agent: {ua}") + + # Host header + host = beacon_config.get("SETTING_C2_HOSTHEADER", "") + print(f" Host Header: {host}") + + # Sleep and jitter for traffic pattern + sleep_ms = beacon_config.get("SETTING_SLEEPTIME", 60000) + jitter = beacon_config.get("SETTING_JITTER", 0) + print(f"\n Sleep Time: {sleep_ms}ms") + print(f" Jitter: {jitter}%") + + # Generate Suricata/Snort signatures + print(f"\n[+] Suggested Network Signatures:") + if ua: + print(f' alert http any any -> any any (msg:"CS Beacon UA"; ' + f'content:"{ua}"; http_user_agent; sid:1000001; rev:1;)') + if get_uri: + print(f' alert http any any -> any any (msg:"CS Beacon URI"; ' + f'content:"{get_uri}"; http_uri; sid:1000002; rev:1;)') +``` + +## Validation Criteria + +- Beacon configuration successfully extracted from PE file or memory dump +- C2 server domains/IPs correctly identified with port and protocol +- Malleable C2 profile parameters decoded showing HTTP transforms +- Watermark value extracted for attribution correlation +- Sleep time and jitter values match observed network beacon intervals +- YARA rules detect beacon in both packed and unpacked samples +- Network signatures generated from extracted C2 profile + +## References + +- [SentinelOne CobaltStrikeParser](https://github.com/Sentinel-One/CobaltStrikeParser) +- [dissect.cobaltstrike Library](https://github.com/fox-it/dissect.cobaltstrike) +- [SentinelLabs Beacon Configuration Analysis](https://www.sentinelone.com/labs/the-anatomy-of-an-apt-attack-and-cobaltstrike-beacons-encoded-configuration/) +- [Cobalt Strike Staging and Config Extraction](https://blog.securehat.co.uk/cobaltstrike/extracting-config-from-cobaltstrike-stager-shellcode) +- [MITRE ATT&CK - Cobalt Strike S0154](https://attack.mitre.org/software/S0154/) diff --git a/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/assets/template.md b/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/assets/template.md new file mode 100644 index 0000000..2f53170 --- /dev/null +++ b/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/assets/template.md @@ -0,0 +1,95 @@ +# Cobalt Strike Beacon Analysis Report Template + +## Report Metadata +| Field | Value | +|-------|-------| +| Report ID | CS-BEACON-YYYY-NNNN | +| Date | YYYY-MM-DD | +| Sample Hash (SHA-256) | | +| Classification | TLP:AMBER | +| Analyst | | + +## Beacon Configuration Summary + +| Setting | Value | +|---------|-------| +| Beacon Type | HTTP / HTTPS / SMB / DNS | +| C2 Server(s) | | +| Port | | +| Sleep Time | ms | +| Jitter | % | +| User-Agent | | +| Watermark | | +| SpawnTo (x86) | | +| SpawnTo (x64) | | +| Named Pipe | | +| Host Header | | +| Crypto Scheme | | + +## C2 Infrastructure + +| Indicator | Type | Value | Context | +|-----------|------|-------|---------| +| C2 Domain | domain | | Primary callback | +| C2 IP | ip | | Resolved address | +| URI Path (GET) | uri | | Beacon check-in | +| URI Path (POST) | uri | | Data exfiltration | + +## Malleable C2 Profile + +### HTTP GET Configuration +| Parameter | Value | +|-----------|-------| +| URI | | +| Verb | | +| Headers | | +| Metadata Encoding | | + +### HTTP POST Configuration +| Parameter | Value | +|-----------|-------| +| URI | | +| Verb | | +| ID Encoding | | +| Output Encoding | | + +## Watermark Attribution + +| Watermark | Known Association | Confidence | +|-----------|------------------|------------| +| | Cracked / Licensed / Threat Actor | High/Med/Low | + +## Network Detection Signatures + +``` +# Suricata signature for beacon C2 traffic +alert http $HOME_NET any -> $EXTERNAL_NET any ( + msg:"Cobalt Strike Beacon C2 Communication"; + content:"[USER_AGENT]"; http_user_agent; + content:"[URI_PATH]"; http_uri; + sid:1000001; rev:1; +) +``` + +## YARA Detection Rule + +```yara +rule CobaltStrike_Beacon_[CAMPAIGN] { + meta: + description = "Detects Cobalt Strike beacon from [CAMPAIGN]" + hash = "[SHA256]" + strings: + $c2 = "[C2_DOMAIN]" ascii + $pipe = "[NAMED_PIPE]" ascii + $ua = "[USER_AGENT]" ascii + condition: + 2 of them +} +``` + +## Recommendations + +1. **Block**: Add C2 domains/IPs to firewall deny lists +2. **Hunt**: Search for named pipe and spawn-to process in endpoint logs +3. **Detect**: Deploy YARA and network signatures to detection stack +4. **Correlate**: Check watermark against threat intelligence databases diff --git a/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/references/api-reference.md b/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/references/api-reference.md new file mode 100644 index 0000000..f2e192e --- /dev/null +++ b/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/references/api-reference.md @@ -0,0 +1,112 @@ +# API Reference: Cobalt Strike Beacon Configuration Analysis + +## Beacon Config TLV Format + +### Structure +``` +[Field ID: 2 bytes][Type: 2 bytes][Value: variable] +Type 1 = short (2 bytes), Type 2 = int (4 bytes), Type 3 = string/blob (2-byte length + data) +``` + +### XOR Encoding +| Version | XOR Key | +|---------|---------| +| CS 3.x | `0x69` | +| CS 4.x | `0x2E` | + +### Key Configuration Fields +| ID | Name | Description | +|----|------|-------------| +| 1 | BeaconType | 0=HTTP, 1=Hybrid, 2=SMB, 8=HTTPS | +| 2 | Port | C2 communication port | +| 3 | SleepTime | Beacon interval (ms) | +| 5 | Jitter | Random sleep variation (%) | +| 7 | PublicKey | RSA public key for encryption | +| 8 | C2Server | Command and control server(s) | +| 9 | UserAgent | HTTP User-Agent string | +| 10 | PostURI | POST callback URI | +| 37 | Watermark | License watermark (operator ID) | +| 54 | PipeName | Named pipe for SMB beacons | + +## 1768.py (Didier Stevens) - Config Extractor + +### Syntax +```bash +python 1768.py # Extract config +python 1768.py -j # JSON output +python 1768.py -r # Raw config dump +``` + +## CobaltStrikeParser (SentinelOne) + +### Syntax +```bash +python parse_beacon_config.py +python parse_beacon_config.py --json +``` + +### Output Fields +``` +BeaconType: HTTPS +Port: 443 +SleepTime: 60000 +Jitter: 37 +C2Server: update.microsoft-cdn.com,/api/v2 +UserAgent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) +Watermark: 305419896 +SpawnToX86: %windir%\syswow64\dllhost.exe +SpawnToX64: %windir%\sysnative\dllhost.exe +``` + +## JARM Fingerprinting + +### Cobalt Strike Default JARM +```bash +# Default CS JARM hash (pre-4.7) +07d14d16d21d21d07c42d41d00041d24a458a375eef0c576d23a7bab9a9fb1 + +# Scan with JARM +python jarm.py -p 443 +``` + +## Known Watermark Values +| Watermark | Attribution | +|-----------|------------| +| 0 | Trial/cracked version | +| 305419896 | Common cracked version | +| 1359593325 | Known threat actor toolkit | +| 1580103824 | Known APT usage | + +## Detection Signatures + +### Suricata +``` +alert http $HOME_NET any -> $EXTERNAL_NET any ( + msg:"ET MALWARE Cobalt Strike Beacon"; + content:"/submit.php"; http_uri; + content:"Cookie:"; http_header; + pcre:"/Cookie:\s[A-Za-z0-9+/=]{60,}/H"; + sid:2028591; rev:1;) +``` + +### YARA +```yara +rule CobaltStrike_Beacon { + strings: + $config_v3 = { 00 01 00 01 00 02 ?? ?? 00 01 00 02 } + $magic = "MSSE-%d-server" + $pipe = "\\\\.\\pipe\\msagent_" + condition: + uint16(0) == 0x5A4D and any of them +} +``` + +## Malleable C2 Profile Elements +| Element | Description | +|---------|-------------| +| `http-get` | GET request profile (URI, headers, metadata transform) | +| `http-post` | POST request profile (URI, body transform) | +| `set sleeptime` | Default beacon interval | +| `set jitter` | Randomization percentage | +| `set useragent` | HTTP User-Agent | +| `set pipename` | SMB named pipe name | diff --git a/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/references/standards.md b/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/references/standards.md new file mode 100644 index 0000000..2c9c8ff --- /dev/null +++ b/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/references/standards.md @@ -0,0 +1,94 @@ +# Standards and Frameworks Reference + +## Cobalt Strike Beacon Configuration Fields + +### Configuration TLV Types +| Type ID | Field Name | Data Type | Description | +|---------|-----------|-----------|-------------| +| 0x0001 | BeaconType | Short | 0=HTTP, 1=Hybrid HTTP/DNS, 8=HTTPS, 10=TCP Bind | +| 0x0002 | Port | Short | C2 communication port | +| 0x0003 | SleepTime | Int | Beacon callback interval in milliseconds | +| 0x0005 | Jitter | Short | Percentage of sleep time randomization (0-99) | +| 0x0008 | C2Server | String | Comma-separated C2 domains/IPs | +| 0x0009 | UserAgent | String | HTTP User-Agent header value | +| 0x000a | PostURI | String | URI for HTTP POST requests | +| 0x000d | SpawnTo_x86 | String | 32-bit process to spawn for post-ex | +| 0x000e | SpawnTo_x64 | String | 64-bit process to spawn for post-ex | +| 0x001a | Watermark | Int | License watermark identifier | +| 0x0024 | PipeName | String | Named pipe for SMB beacon | +| 0x001d | HostHeader | String | HTTP Host header value | +| 0x0032 | ProxyHostname | String | Proxy server address | + +### XOR Encoding Scheme +- **Cobalt Strike 3.x**: XOR key = 0x69 +- **Cobalt Strike 4.x**: XOR key = 0x2e +- Configuration blob size: 4096 bytes (typical) +- Encoding: Single-byte XOR across entire config blob + +### Stageless Beacon Structure +- PE with beacon code in .data section +- 4-byte XOR key applied to .data section content +- Configuration embedded after beacon code +- Reflective DLL loader prepended to beacon + +## MITRE ATT&CK Mappings + +### Cobalt Strike Techniques (S0154) +| Technique | ID | Description | +|-----------|-----|------------| +| Application Layer Protocol | T1071.001 | HTTP/HTTPS C2 communication | +| Encrypted Channel | T1573.002 | AES-256 encrypted C2 | +| Ingress Tool Transfer | T1105 | Download additional payloads | +| Process Injection | T1055 | Inject into spawned processes | +| Named Pipes | T1570 | SMB beacon lateral movement | +| Service Execution | T1569.002 | PSExec-style lateral movement | +| Reflective Code Loading | T1620 | In-memory beacon loading | + +## Malleable C2 Profile Structure + +### HTTP GET Block +``` +http-get { + set uri "/path"; + client { + header "Accept" "text/html"; + metadata { + base64url; + prepend "session="; + header "Cookie"; + } + } + server { + header "Content-Type" "text/html"; + output { + print; + } + } +} +``` + +### HTTP POST Block +``` +http-post { + set uri "/submit"; + client { + id { + uri-append; + } + output { + base64; + print; + } + } + server { + output { + print; + } + } +} +``` + +## References +- [Cobalt Strike Documentation](https://hstechdocs.helpsystems.com/manuals/cobaltstrike/) +- [Malleable C2 Profile Reference](https://hstechdocs.helpsystems.com/manuals/cobaltstrike/current/userguide/content/topics/malleable-c2_main.htm) +- [MITRE ATT&CK Cobalt Strike](https://attack.mitre.org/software/S0154/) diff --git a/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/references/workflows.md b/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/references/workflows.md new file mode 100644 index 0000000..c0ccd1b --- /dev/null +++ b/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/references/workflows.md @@ -0,0 +1,72 @@ +# Cobalt Strike Beacon Analysis Workflows + +## Workflow 1: PE File Configuration Extraction + +``` +[Suspicious PE] --> [Unpack if packed] --> [Locate .data section] --> [XOR Decrypt] + | + v + [Parse TLV Config] + | + v + [Extract C2 Indicators] +``` + +### Steps: +1. **Triage**: Identify file as potential Cobalt Strike beacon via YARA or AV detection +2. **Unpacking**: If packed, unpack using appropriate tool (UPX, custom unpacker) +3. **Section Analysis**: Locate .data section containing XOR'd beacon code +4. **XOR Key Discovery**: Try known keys (0x2e, 0x69) or brute-force 4-byte key +5. **Config Parsing**: Parse decrypted TLV entries for C2 and operational settings +6. **IOC Extraction**: Extract domains, IPs, URIs, user agents, watermarks + +## Workflow 2: Memory Dump Beacon Extraction + +``` +[Memory Dump] --> [Volatility3 malfind] --> [Dump Injected Regions] --> [Parse Config] + | + v + [C2 Infrastructure Map] +``` + +### Steps: +1. **Acquisition**: Capture memory dump from compromised system +2. **Process Scan**: Use Volatility3 to identify suspicious processes +3. **Injection Detection**: Use malfind to find RWX memory regions +4. **Region Extraction**: Dump injected memory regions to files +5. **Config Search**: Scan dumps for beacon configuration signatures +6. **Infrastructure Mapping**: Correlate extracted C2 with network logs + +## Workflow 3: Watermark Attribution + +``` +[Multiple Beacons] --> [Extract Watermarks] --> [Cluster by Watermark] --> [Attribution] + | + v + [Campaign Correlation] +``` + +### Steps: +1. **Collection**: Gather beacon samples from incident or threat intel feeds +2. **Watermark Extraction**: Extract watermark value from each sample +3. **Database Lookup**: Check watermark against known databases +4. **Clustering**: Group beacons sharing the same watermark +5. **Infrastructure Overlap**: Correlate C2 infrastructure across cluster +6. **Attribution Assessment**: Link to known threat actor or cracked license + +## Workflow 4: C2 Traffic Detection + +``` +[Beacon Config] --> [Extract C2 Profile] --> [Generate Signatures] --> [Deploy to NIDS] + | + v + [Monitor Network Traffic] +``` + +### Steps: +1. **Profile Extraction**: Parse malleable C2 profile from beacon config +2. **Pattern Identification**: Identify unique HTTP headers, URIs, and encoding +3. **Signature Creation**: Write Suricata/Snort rules matching C2 patterns +4. **Deployment**: Deploy signatures to network detection infrastructure +5. **Validation**: Test signatures against captured beacon traffic +6. **Monitoring**: Alert on matching network flows for active beacons diff --git a/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/scripts/agent.py b/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/scripts/agent.py new file mode 100644 index 0000000..4df373c --- /dev/null +++ b/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/scripts/agent.py @@ -0,0 +1,239 @@ +#!/usr/bin/env python3 +"""Cobalt Strike beacon configuration extraction and analysis agent. + +Extracts C2 configuration from beacon payloads including server addresses, +communication settings, malleable C2 profile details, and watermark values. +""" + +import struct +import os +import sys +import hashlib +from collections import OrderedDict + +# Cobalt Strike beacon configuration field IDs (Type-Length-Value format) +BEACON_CONFIG_FIELDS = { + 1: ("BeaconType", "short"), + 2: ("Port", "short"), + 3: ("SleepTime", "int"), + 4: ("MaxGetSize", "int"), + 5: ("Jitter", "short"), + 7: ("PublicKey", "bytes"), + 8: ("C2Server", "str"), + 9: ("UserAgent", "str"), + 10: ("PostURI", "str"), + 11: ("Malleable_C2_Instructions", "bytes"), + 12: ("HttpGet_Metadata", "bytes"), + 13: ("HttpPost_Metadata", "bytes"), + 14: ("SpawnToX86", "str"), + 15: ("SpawnToX64", "str"), + 19: ("CryptoScheme", "short"), + 26: ("GetVerb", "str"), + 27: ("PostVerb", "str"), + 28: ("HttpPostChunk", "int"), + 29: ("Spawnto_x86", "str"), + 30: ("Spawnto_x64", "str"), + 31: ("CryptoScheme2", "str"), + 37: ("Watermark", "int"), + 38: ("StageCleanup", "short"), + 39: ("CFGCaution", "short"), + 43: ("DNS_Idle", "int"), + 44: ("DNS_Sleep", "int"), + 50: ("HostHeader", "str"), + 54: ("PipeName", "str"), +} + +BEACON_TYPES = {0: "HTTP", 1: "Hybrid HTTP/DNS", 2: "SMB", 4: "TCP", 8: "HTTPS", 16: "DNS over HTTPS"} + +XOR_KEY_V3 = 0x69 +XOR_KEY_V4 = 0x2E + + +def compute_hash(filepath): + """Compute SHA-256 hash of file.""" + sha256 = hashlib.sha256() + with open(filepath, "rb") as f: + for chunk in iter(lambda: f.read(65536), b""): + sha256.update(chunk) + return sha256.hexdigest() + + +def find_config_offset(data): + """Find the beacon configuration blob in PE data or shellcode.""" + # Look for XOR-encoded config patterns + for xor_key in [XOR_KEY_V3, XOR_KEY_V4]: + # Config starts with 0x0001 (BeaconType field ID) XOR-encoded + encoded_marker = bytes([0x00 ^ xor_key, 0x01 ^ xor_key, 0x00 ^ xor_key, 0x01 ^ xor_key]) + offset = data.find(encoded_marker) + if offset != -1: + return offset, xor_key + # Try unencoded + for offset in range(len(data) - 100): + if data[offset:offset+4] == b"\x00\x01\x00\x01": + return offset, None + return -1, None + + +def xor_decode(data, key): + """XOR decode data with single byte key.""" + if key is None: + return data + return bytes(b ^ key for b in data) + + +def parse_config_field(data, offset): + """Parse a single TLV config field.""" + if offset + 6 > len(data): + return None, None, None, offset + field_id = struct.unpack_from(">H", data, offset)[0] + field_type = struct.unpack_from(">H", data, offset + 2)[0] + if field_type == 1: # short + value = struct.unpack_from(">H", data, offset + 4)[0] + return field_id, "short", value, offset + 6 + elif field_type == 2: # int + value = struct.unpack_from(">I", data, offset + 4)[0] + return field_id, "int", value, offset + 8 + elif field_type == 3: # str/bytes + length = struct.unpack_from(">H", data, offset + 4)[0] + if offset + 6 + length > len(data): + return None, None, None, offset + value = data[offset + 6:offset + 6 + length] + return field_id, "str", value, offset + 6 + length + return None, None, None, offset + 2 + + +def extract_beacon_config(filepath): + """Extract and parse Cobalt Strike beacon configuration.""" + with open(filepath, "rb") as f: + data = f.read() + + config_offset, xor_key = find_config_offset(data) + if config_offset == -1: + return {"error": "No beacon configuration found", "file": filepath} + + config_data = xor_decode(data[config_offset:config_offset + 4096], xor_key) + config = OrderedDict() + config["_meta"] = { + "config_offset": f"0x{config_offset:08X}", + "xor_key": f"0x{xor_key:02X}" if xor_key else "none", + "version_guess": "4.x" if xor_key == XOR_KEY_V4 else "3.x" if xor_key == XOR_KEY_V3 else "unknown", + } + + offset = 0 + max_fields = 100 + parsed = 0 + while offset < len(config_data) - 4 and parsed < max_fields: + field_id, field_type, value, new_offset = parse_config_field(config_data, offset) + if field_id is None or new_offset == offset: + break + offset = new_offset + parsed += 1 + + field_info = BEACON_CONFIG_FIELDS.get(field_id) + if field_info: + field_name, expected_type = field_info + if isinstance(value, bytes): + try: + str_value = value.rstrip(b"\x00").decode("utf-8", errors="replace") + config[field_name] = str_value + except Exception: + config[field_name] = value.hex()[:100] + elif field_id == 1: + config[field_name] = BEACON_TYPES.get(value, f"Unknown({value})") + else: + config[field_name] = value + + return config + + +def extract_c2_indicators(config): + """Extract C2 indicators from parsed config for threat intelligence.""" + indicators = {"c2_servers": [], "user_agents": [], "uris": [], + "pipes": [], "watermark": None, "dns": []} + c2 = config.get("C2Server", "") + if c2: + for server in c2.split(","): + server = server.strip().rstrip("/") + if server: + indicators["c2_servers"].append(server) + ua = config.get("UserAgent", "") + if ua: + indicators["user_agents"].append(ua) + for key in ["PostURI"]: + uri = config.get(key, "") + if uri: + indicators["uris"].append(uri) + pipe = config.get("PipeName", "") + if pipe: + indicators["pipes"].append(pipe) + wm = config.get("Watermark") + if wm: + indicators["watermark"] = wm + return indicators + + +def assess_operator_opsec(config): + """Assess operator OPSEC based on beacon configuration.""" + findings = [] + sleep = config.get("SleepTime", 0) + jitter = config.get("Jitter", 0) + if sleep < 30000: + findings.append({"level": "INFO", "detail": f"Low sleep time: {sleep}ms - high beacon frequency"}) + if jitter == 0: + findings.append({"level": "WARN", "detail": "No jitter configured - predictable beacon interval"}) + ua = config.get("UserAgent", "") + if "Mozilla" not in ua and ua: + findings.append({"level": "WARN", "detail": f"Non-standard User-Agent: {ua[:60]}"}) + spawn86 = config.get("SpawnToX86", config.get("Spawnto_x86", "")) + if "rundll32" in spawn86.lower(): + findings.append({"level": "INFO", "detail": "Default spawn-to process (rundll32) - easy to detect"}) + cleanup = config.get("StageCleanup", 0) + if cleanup == 0: + findings.append({"level": "INFO", "detail": "Stage cleanup disabled - beacon stub remains in memory"}) + return findings + + +if __name__ == "__main__": + print("=" * 60) + print("Cobalt Strike Beacon Configuration Extractor") + print("C2 extraction, watermark analysis, OPSEC assessment") + print("=" * 60) + + target = sys.argv[1] if len(sys.argv) > 1 else None + + if not target or not os.path.exists(target): + print("\n[DEMO] Usage: python agent.py ") + print(" Extracts: C2 servers, sleep/jitter, watermark, malleable profile") + sys.exit(0) + + print(f"\n[*] Analyzing: {target}") + print(f"[*] SHA-256: {compute_hash(target)}") + print(f"[*] Size: {os.path.getsize(target)} bytes") + + config = extract_beacon_config(target) + + if "error" in config: + print(f"\n[!] {config['error']}") + sys.exit(1) + + print("\n--- Beacon Configuration ---") + for key, value in config.items(): + if key == "_meta": + for mk, mv in value.items(): + print(f" {mk}: {mv}") + else: + print(f" {key}: {value}") + + indicators = extract_c2_indicators(config) + print("\n--- C2 Indicators ---") + for c2 in indicators["c2_servers"]: + print(f" [C2] {c2}") + if indicators["watermark"]: + print(f" [Watermark] {indicators['watermark']}") + for pipe in indicators["pipes"]: + print(f" [Pipe] {pipe}") + + opsec = assess_operator_opsec(config) + print("\n--- Operator OPSEC Assessment ---") + for f in opsec: + print(f" [{f['level']}] {f['detail']}") diff --git a/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/scripts/process.py b/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/scripts/process.py new file mode 100644 index 0000000..43e3f66 --- /dev/null +++ b/personas/_shared/skills/analyzing-cobalt-strike-beacon-configuration/scripts/process.py @@ -0,0 +1,337 @@ +#!/usr/bin/env python3 +""" +Cobalt Strike Beacon Configuration Analyzer + +Extracts and analyzes beacon configurations from PE files, shellcode, +and memory dumps using dissect.cobaltstrike and manual parsing. + +Requirements: + pip install dissect.cobaltstrike pefile yara-python + +Usage: + python process.py --file beacon.exe --output report.json + python process.py --file memdump.bin --scan-memory + python process.py --directory ./samples --batch +""" + +import argparse +import json +import os +import struct +import sys +from collections import defaultdict +from datetime import datetime +from pathlib import Path + +try: + from dissect.cobaltstrike.beacon import BeaconConfig +except ImportError: + print("ERROR: dissect.cobaltstrike not installed.") + print("Run: pip install dissect.cobaltstrike") + sys.exit(1) + + +# TLV field type mapping +TLV_FIELDS = { + 0x0001: ("BeaconType", "short"), + 0x0002: ("Port", "short"), + 0x0003: ("SleepTime", "int"), + 0x0004: ("MaxGetSize", "int"), + 0x0005: ("Jitter", "short"), + 0x0006: ("MaxDNS", "short"), + 0x0008: ("C2Server", "str"), + 0x0009: ("UserAgent", "str"), + 0x000a: ("PostURI", "str"), + 0x000b: ("Malleable_C2_Instructions", "blob"), + 0x000d: ("SpawnTo_x86", "str"), + 0x000e: ("SpawnTo_x64", "str"), + 0x000f: ("CryptoScheme", "short"), + 0x001a: ("Watermark", "int"), + 0x001d: ("HostHeader", "str"), + 0x0024: ("PipeName", "str"), + 0x0025: ("Year", "short"), + 0x0026: ("Month", "short"), + 0x0027: ("Day", "short"), + 0x002c: ("ProxyHostname", "str"), + 0x002d: ("ProxyUsername", "str"), + 0x002e: ("ProxyPassword", "str"), +} + +BEACON_TYPES = { + 0: "HTTP", + 1: "Hybrid HTTP/DNS", + 2: "SMB", + 4: "TCP", + 8: "HTTPS", + 10: "TCP Bind", + 14: "External C2", +} + + +class BeaconAnalyzer: + """Analyze Cobalt Strike beacon configurations.""" + + def __init__(self): + self.results = [] + + def analyze_file(self, filepath): + """Extract beacon config from a file.""" + filepath = Path(filepath) + if not filepath.exists(): + print(f"[-] File not found: {filepath}") + return None + + print(f"[*] Analyzing: {filepath}") + + # Try dissect.cobaltstrike first + result = self._extract_with_dissect(filepath) + + # Fall back to manual extraction + if not result: + result = self._extract_manual(filepath) + + if result: + result["source_file"] = str(filepath) + result["analysis_time"] = datetime.now().isoformat() + self.results.append(result) + + return result + + def _extract_with_dissect(self, filepath): + """Extract config using dissect.cobaltstrike library.""" + try: + configs = list(BeaconConfig.from_path(filepath)) + if not configs: + return None + + config = configs[0] + settings = config.as_dict() + + result = { + "method": "dissect.cobaltstrike", + "config": {}, + "indicators": {}, + } + + for key, value in settings.items(): + if value is not None: + result["config"][key] = str(value) + + result["indicators"] = self._extract_indicators(settings) + return result + + except Exception as e: + print(f" [!] dissect extraction failed: {e}") + return None + + def _extract_manual(self, filepath): + """Manual XOR-based config extraction.""" + try: + with open(filepath, "rb") as f: + data = f.read() + except Exception as e: + print(f" [!] Read failed: {e}") + return None + + for xor_key in [0x2e, 0x69]: + # Search for XOR'd config start marker + magic = bytes([0x00 ^ xor_key, 0x01 ^ xor_key, + 0x00 ^ xor_key, 0x02 ^ xor_key]) + + offset = data.find(magic) + if offset == -1: + continue + + print(f" [+] Config found at 0x{offset:x} (XOR key: 0x{xor_key:02x})") + + config_blob = data[offset:offset + 4096] + decrypted = bytes([b ^ xor_key for b in config_blob]) + + entries = self._parse_tlv(decrypted) + if entries: + return { + "method": "manual_xor", + "xor_key": f"0x{xor_key:02x}", + "config_offset": f"0x{offset:x}", + "config": entries, + "indicators": self._extract_indicators(entries), + } + + return None + + def _parse_tlv(self, data): + """Parse TLV configuration entries.""" + entries = {} + offset = 0 + + while offset + 6 <= len(data): + try: + entry_type = struct.unpack(">H", data[offset:offset+2])[0] + data_type = struct.unpack(">H", data[offset+2:offset+4])[0] + entry_len = struct.unpack(">H", data[offset+4:offset+6])[0] + except struct.error: + break + + if entry_type == 0 or entry_len > 4096: + break + + value_data = data[offset+6:offset+6+entry_len] + field_info = TLV_FIELDS.get(entry_type) + + if field_info: + field_name, expected_type = field_info + else: + field_name = f"Unknown_0x{entry_type:04x}" + expected_type = "blob" + + if data_type == 1 and len(value_data) >= 2: + value = struct.unpack(">H", value_data[:2])[0] + elif data_type == 2 and len(value_data) >= 4: + value = struct.unpack(">I", value_data[:4])[0] + elif data_type == 3: + value = value_data.rstrip(b'\x00').decode('utf-8', errors='replace') + else: + value = value_data.hex() + + # Resolve beacon type names + if field_name == "BeaconType" and isinstance(value, int): + value = BEACON_TYPES.get(value, f"Unknown ({value})") + + entries[field_name] = value + offset += 6 + entry_len + + return entries + + def _extract_indicators(self, config): + """Extract IOCs from parsed configuration.""" + indicators = { + "c2_servers": [], + "user_agent": "", + "named_pipes": [], + "spawn_processes": [], + "watermark": "", + "beacon_type": "", + "sleep_time_ms": 0, + "jitter_pct": 0, + } + + # Handle both dissect dict keys and manual parse keys + c2_keys = ["SETTING_DOMAINS", "C2Server"] + for key in c2_keys: + domains = config.get(key, "") + if domains: + for d in str(domains).split(","): + d = d.strip().rstrip("/") + if d: + indicators["c2_servers"].append(d) + + ua_keys = ["SETTING_USERAGENT", "UserAgent"] + for key in ua_keys: + ua = config.get(key, "") + if ua: + indicators["user_agent"] = str(ua) + + pipe_keys = ["SETTING_PIPENAME", "PipeName"] + for key in pipe_keys: + pipe = config.get(key, "") + if pipe: + indicators["named_pipes"].append(str(pipe)) + + spawn_keys = [ + ("SETTING_SPAWNTO_X86", "SpawnTo_x86"), + ("SETTING_SPAWNTO_X64", "SpawnTo_x64"), + ] + for dissect_key, manual_key in spawn_keys: + for key in [dissect_key, manual_key]: + proc = config.get(key, "") + if proc: + indicators["spawn_processes"].append(str(proc)) + + wm_keys = ["SETTING_WATERMARK", "Watermark"] + for key in wm_keys: + wm = config.get(key, "") + if wm: + indicators["watermark"] = str(wm) + + return indicators + + def batch_analyze(self, directory): + """Analyze all files in a directory.""" + directory = Path(directory) + extensions = {".exe", ".dll", ".bin", ".dmp", ".raw"} + + for filepath in directory.rglob("*"): + if filepath.suffix.lower() in extensions: + self.analyze_file(filepath) + + return self.results + + def cluster_by_watermark(self): + """Cluster analyzed beacons by watermark.""" + clusters = defaultdict(list) + + for result in self.results: + wm = result.get("indicators", {}).get("watermark", "unknown") + clusters[wm].append(result.get("source_file", "unknown")) + + return dict(clusters) + + def generate_report(self, output_path=None): + """Generate JSON analysis report.""" + report = { + "analysis_date": datetime.now().isoformat(), + "total_beacons": len(self.results), + "watermark_clusters": self.cluster_by_watermark(), + "all_c2_servers": list(set( + server + for r in self.results + for server in r.get("indicators", {}).get("c2_servers", []) + )), + "results": self.results, + } + + if output_path: + with open(output_path, "w") as f: + json.dump(report, f, indent=2, default=str) + print(f"[+] Report saved to {output_path}") + + return report + + +def main(): + parser = argparse.ArgumentParser( + description="Cobalt Strike Beacon Configuration Analyzer" + ) + parser.add_argument("--file", help="Single file to analyze") + parser.add_argument("--directory", help="Directory for batch analysis") + parser.add_argument("--output", default="beacon_report.json", + help="Output report path") + parser.add_argument("--scan-memory", action="store_true", + help="Treat input as raw memory dump") + parser.add_argument("--batch", action="store_true", + help="Batch analyze directory") + + args = parser.parse_args() + analyzer = BeaconAnalyzer() + + if args.file: + result = analyzer.analyze_file(args.file) + if result: + print(json.dumps(result, indent=2, default=str)) + + elif args.directory and args.batch: + results = analyzer.batch_analyze(args.directory) + print(f"\n[+] Analyzed {len(results)} beacons") + + else: + parser.print_help() + sys.exit(1) + + report = analyzer.generate_report(args.output) + print(f"\n[+] Total C2 servers found: {len(report['all_c2_servers'])}") + for server in report["all_c2_servers"]: + print(f" {server}") + + +if __name__ == "__main__": + main() diff --git a/personas/_shared/skills/analyzing-cobaltstrike-malleable-c2-profiles/LICENSE b/personas/_shared/skills/analyzing-cobaltstrike-malleable-c2-profiles/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-cobaltstrike-malleable-c2-profiles/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-cobaltstrike-malleable-c2-profiles/SKILL.md b/personas/_shared/skills/analyzing-cobaltstrike-malleable-c2-profiles/SKILL.md new file mode 100644 index 0000000..31f10c0 --- /dev/null +++ b/personas/_shared/skills/analyzing-cobaltstrike-malleable-c2-profiles/SKILL.md @@ -0,0 +1,61 @@ +--- +name: analyzing-cobaltstrike-malleable-c2-profiles +description: Parse and analyze Cobalt Strike Malleable C2 profiles using dissect.cobaltstrike and pyMalleableC2 to extract + C2 indicators, detect evasion techniques, and generate network detection signatures. +domain: cybersecurity +subdomain: malware-analysis +tags: +- cobalt-strike +- malleable-c2 +- c2-detection +- beacon-analysis +- network-signatures +- threat-hunting +- red-team-tools +version: '1.0' +author: mahipal +license: Apache-2.0 +nist_csf: +- DE.AE-02 +- RS.AN-03 +- ID.RA-01 +- DE.CM-01 +--- +# Analyzing CobaltStrike Malleable C2 Profiles + +## Overview + +Cobalt Strike Malleable C2 profiles are domain-specific language scripts that customize how Beacon communicates with the team server, defining HTTP request/response transformations, sleep intervals, jitter values, user agents, URI paths, and process injection behavior. Threat actors use malleable profiles to disguise C2 traffic as legitimate services (Amazon, Google, Slack). Analyzing these profiles reveals network indicators for detection: URI patterns, HTTP headers, POST/GET transforms, DNS settings, and process injection techniques. The `dissect.cobaltstrike` library can parse both profile files and extract configurations from beacon payloads, while `pyMalleableC2` provides AST-based parsing using Lark grammar for programmatic profile manipulation and validation. + + +## When to Use + +- When investigating security incidents that require analyzing cobaltstrike malleable c2 profiles +- When building detection rules or threat hunting queries for this domain +- When SOC analysts need structured procedures for this analysis type +- When validating security monitoring coverage for related attack techniques + +## Prerequisites + +- Python 3.9+ with `dissect.cobaltstrike` and/or `pyMalleableC2` +- Sample Malleable C2 profiles (available from public repositories) +- Understanding of HTTP protocol and Cobalt Strike beacon communication model +- Network monitoring tools (Suricata/Snort) for signature deployment +- PCAP analysis tools for traffic validation + +## Steps + +1. Install libraries: `pip install dissect.cobaltstrike` or `pip install pyMalleableC2` +2. Parse profile with `C2Profile.from_path("profile.profile")` +3. Extract HTTP GET/POST block configurations (URIs, headers, parameters) +4. Identify user agent strings and spoof targets +5. Extract sleep time, jitter percentage, and DNS beacon settings +6. Analyze process injection settings (spawn-to, allocation technique) +7. Generate Suricata/Snort signatures from extracted network indicators +8. Compare profile against known threat actor profile collections +9. Extract staging URIs and payload delivery mechanisms +10. Produce detection report with IOCs and recommended network signatures + +## Expected Output + +A JSON report containing extracted C2 URIs, HTTP headers, user agents, sleep/jitter settings, process injection config, spawned process paths, DNS settings, and generated Suricata-compatible detection rules. diff --git a/personas/_shared/skills/analyzing-cobaltstrike-malleable-c2-profiles/references/api-reference.md b/personas/_shared/skills/analyzing-cobaltstrike-malleable-c2-profiles/references/api-reference.md new file mode 100644 index 0000000..420dafa --- /dev/null +++ b/personas/_shared/skills/analyzing-cobaltstrike-malleable-c2-profiles/references/api-reference.md @@ -0,0 +1,95 @@ +# CobaltStrike Malleable C2 Profile Analysis API Reference + +## Installation + +```bash +pip install dissect.cobaltstrike +pip install 'dissect.cobaltstrike[full]' # With PCAP support +pip install pyMalleableC2 # Alternative parser +``` + +## dissect.cobaltstrike API + +### Parse Beacon Configuration +```python +from dissect.cobaltstrike.beacon import BeaconConfig + +bconfig = BeaconConfig.from_path("beacon.bin") +print(hex(bconfig.watermark)) # 0x5109bf6d +print(bconfig.protocol) # https +print(bconfig.version) # BeaconVersion(...) +print(bconfig.settings) # Full config dict +``` + +### Parse Malleable C2 Profile +```python +from dissect.cobaltstrike.c2profile import C2Profile + +profile = C2Profile.from_path("amazon.profile") +config = profile.as_dict() +print(config["useragent"]) +print(config["http-get.uri"]) +print(config["sleeptime"]) +``` + +### PCAP Analysis +```bash +# Extract beacons from PCAP +beacon-pcap --extract-beacons traffic.pcap + +# Decrypt traffic with private key +beacon-pcap -p team_server.pem traffic.pcap --beacon beacon.bin +``` + +## pyMalleableC2 API + +```python +from malleableC2 import Profile + +profile = Profile.from_file("amazon.profile") +print(profile.sleeptime) +print(profile.useragent) +print(profile.http_get.uri) +print(profile.http_post.uri) +``` + +## Key Profile Settings + +| Setting | Description | Detection Value | +|---------|-------------|-----------------| +| `sleeptime` | Callback interval (ms) | Low values = aggressive beaconing | +| `jitter` | Sleep randomization % | Timing analysis evasion | +| `useragent` | HTTP User-Agent string | Network signature | +| `http-get.uri` | GET request URI path | URI-based detection | +| `http-post.uri` | POST request URI path | URI-based detection | +| `spawnto_x86` | 32-bit spawn process | Process creation detection | +| `spawnto_x64` | 64-bit spawn process | Process creation detection | +| `pipename` | Named pipe pattern | Named pipe monitoring | +| `dns_idle` | DNS idle IP address | DNS beacon detection | +| `watermark` | License watermark | Operator attribution | + +## Suricata Rule Format + +``` +alert http $HOME_NET any -> $EXTERNAL_NET any ( + msg:"MALWARE CobaltStrike C2 URI"; + flow:established,to_server; + http.uri; content:"/api/v1/status"; + http.header; content:"User-Agent: Mozilla/5.0"; + sid:9000001; rev:1; +) +``` + +## CLI Usage + +```bash +python agent.py --input profile.profile --output report.json +python agent.py --input parsed_config.json --output report.json +``` + +## References + +- dissect.cobaltstrike: https://github.com/fox-it/dissect.cobaltstrike +- pyMalleableC2: https://github.com/byt3bl33d3r/pyMalleableC2 +- Unit42 Analysis: https://unit42.paloaltonetworks.com/cobalt-strike-malleable-c2-profile/ +- Config Extractor: https://github.com/strozfriedberg/cobaltstrike-config-extractor diff --git a/personas/_shared/skills/analyzing-cobaltstrike-malleable-c2-profiles/scripts/agent.py b/personas/_shared/skills/analyzing-cobaltstrike-malleable-c2-profiles/scripts/agent.py new file mode 100644 index 0000000..582d5d3 --- /dev/null +++ b/personas/_shared/skills/analyzing-cobaltstrike-malleable-c2-profiles/scripts/agent.py @@ -0,0 +1,234 @@ +#!/usr/bin/env python3 +"""CobaltStrike Malleable C2 Profile Analyzer - parses profiles to extract C2 indicators, detection signatures, and evasion techniques""" +# For authorized security research and defensive analysis only + +import argparse +import json +import re +from collections import Counter +from datetime import datetime +from pathlib import Path + +try: + from dissect.cobaltstrike.c2profile import C2Profile + HAS_DISSECT = True +except ImportError: + HAS_DISSECT = False + +RUN_KEY_SUSPICIOUS = ["powershell", "cmd.exe", "mshta", "rundll32", "regsvr32", "wscript", "cscript"] + +KNOWN_SPOOF_TARGETS = { + "amazon": "Amazon CDN impersonation", + "google": "Google services impersonation", + "microsoft": "Microsoft services impersonation", + "slack": "Slack API impersonation", + "cloudfront": "CloudFront CDN impersonation", + "jquery": "jQuery CDN impersonation", + "outlook": "Outlook Web impersonation", + "onedrive": "OneDrive impersonation", +} + + +def load_data(path): + return json.loads(Path(path).read_text(encoding="utf-8")) + + +def parse_profile_with_dissect(profile_path): + """Parse a .profile file using dissect.cobaltstrike C2Profile.""" + if not HAS_DISSECT: + return None + profile = C2Profile.from_path(profile_path) + return profile.as_dict() + + +def parse_profile_regex(content): + """Regex-based parser for malleable C2 profile when dissect is unavailable.""" + config = {} + set_pattern = re.compile(r'set\s+(\w+)\s+"([^"]*)"', re.MULTILINE) + for match in set_pattern.finditer(content): + config[match.group(1)] = match.group(2) + block_pattern = re.compile(r'(http-get|http-post|http-stager|https-certificate|dns-beacon|process-inject|post-ex)\s*\{', re.MULTILINE) + for match in block_pattern.finditer(content): + config.setdefault("blocks", []).append(match.group(1)) + uri_pattern = re.compile(r'set\s+uri\s+"([^"]*)"', re.MULTILINE) + for match in uri_pattern.finditer(content): + config.setdefault("uris", []).append(match.group(1)) + header_pattern = re.compile(r'header\s+"([^"]+)"\s+"([^"]*)"', re.MULTILINE) + for match in header_pattern.finditer(content): + config.setdefault("headers", []).append({"name": match.group(1), "value": match.group(2)}) + spawn_pattern = re.compile(r'set\s+spawnto_x(?:86|64)\s+"([^"]*)"', re.MULTILINE) + for match in spawn_pattern.finditer(content): + config.setdefault("spawn_to", []).append(match.group(1)) + return config + + +def analyze_profile(config): + """Analyze parsed profile configuration for detection opportunities.""" + findings = [] + ua = config.get("useragent", config.get("user_agent", "")) + if ua: + findings.append({ + "type": "user_agent_identified", + "severity": "info", + "resource": "http-config", + "detail": f"User-Agent: {ua[:100]}", + "indicator": ua, + }) + for target, desc in KNOWN_SPOOF_TARGETS.items(): + if target.lower() in ua.lower(): + findings.append({ + "type": "service_impersonation", + "severity": "medium", + "resource": "user-agent", + "detail": f"{desc} detected in User-Agent string", + }) + sleeptime = config.get("sleeptime", config.get("sleep_time", "")) + jitter = config.get("jitter", "") + if sleeptime: + try: + sleep_ms = int(sleeptime) + if sleep_ms < 1000: + findings.append({ + "type": "aggressive_beaconing", + "severity": "high", + "resource": "beacon-config", + "detail": f"Very low sleep time: {sleep_ms}ms - aggressive C2 callback rate", + }) + except ValueError: + pass + uris = config.get("uris", []) + for uri in uris: + findings.append({ + "type": "c2_uri", + "severity": "high", + "resource": "http-config", + "detail": f"C2 URI path: {uri}", + "indicator": uri, + }) + headers = config.get("headers", []) + for h in headers: + name = h.get("name", "") if isinstance(h, dict) else str(h) + value = h.get("value", "") if isinstance(h, dict) else "" + if name.lower() in ("host", "cookie", "authorization"): + findings.append({ + "type": "c2_header", + "severity": "medium", + "resource": "http-config", + "detail": f"Custom header: {name}: {value[:60]}", + }) + spawn_to = config.get("spawn_to", config.get("spawnto_x86", [])) + if isinstance(spawn_to, str): + spawn_to = [spawn_to] + for proc in spawn_to: + findings.append({ + "type": "spawn_to_process", + "severity": "high", + "resource": "process-inject", + "detail": f"Beacon spawns to: {proc}", + "indicator": proc, + }) + pipename = config.get("pipename", config.get("pipename_stager", "")) + if pipename: + findings.append({ + "type": "named_pipe", + "severity": "high", + "resource": "process-inject", + "detail": f"Named pipe: {pipename}", + "indicator": pipename, + }) + dns_idle = config.get("dns_idle", "") + if dns_idle: + findings.append({ + "type": "dns_beacon_config", + "severity": "medium", + "resource": "dns-beacon", + "detail": f"DNS idle IP: {dns_idle}", + }) + watermark = config.get("watermark", "") + if watermark: + findings.append({ + "type": "watermark", + "severity": "info", + "resource": "beacon-config", + "detail": f"Beacon watermark: {watermark}", + }) + return findings + + +def generate_suricata_rules(findings, sid_start=9000001): + """Generate Suricata rules from extracted indicators.""" + rules = [] + sid = sid_start + for f in findings: + if f["type"] == "c2_uri" and f.get("indicator"): + uri = f["indicator"].replace('"', '\\"') + rules.append( + f'alert http $HOME_NET any -> $EXTERNAL_NET any ' + f'(msg:"MALWARE CobaltStrike Malleable C2 URI {uri}"; ' + f'flow:established,to_server; ' + f'http.uri; content:"{uri}"; ' + f'sid:{sid}; rev:1;)' + ) + sid += 1 + elif f["type"] == "named_pipe" and f.get("indicator"): + pipe = f["indicator"] + rules.append( + f'# Named pipe detection requires endpoint monitoring: {pipe}' + ) + return rules + + +def analyze(data): + if isinstance(data, str): + config = parse_profile_regex(data) + elif isinstance(data, dict): + config = data + else: + config = data[0] if isinstance(data, list) and data else {} + return analyze_profile(config) + + +def generate_report(input_path): + path = Path(input_path) + if path.suffix in (".profile", ".txt"): + content = path.read_text(encoding="utf-8") + config = parse_profile_regex(content) + findings = analyze_profile(config) + else: + data = load_data(input_path) + if isinstance(data, list): + findings = [] + for profile in data: + findings.extend(analyze_profile(profile)) + else: + findings = analyze_profile(data) + sev = Counter(f["severity"] for f in findings) + iocs = [f.get("indicator", "") for f in findings if f.get("indicator")] + rules = generate_suricata_rules(findings) + return { + "report": "cobaltstrike_malleable_c2_analysis", + "generated_at": datetime.utcnow().isoformat() + "Z", + "total_findings": len(findings), + "severity_summary": dict(sev), + "extracted_iocs": iocs, + "suricata_rules": rules, + "findings": findings, + } + + +def main(): + ap = argparse.ArgumentParser(description="CobaltStrike Malleable C2 Profile Analyzer") + ap.add_argument("--input", required=True, help="Input .profile file or JSON with parsed config") + ap.add_argument("--output", help="Output JSON report path") + args = ap.parse_args() + report = generate_report(args.input) + out = json.dumps(report, indent=2) + if args.output: + Path(args.output).write_text(out, encoding="utf-8") + print(f"Report written to {args.output}") + else: + print(out) + + +if __name__ == "__main__": + main() diff --git a/personas/_shared/skills/analyzing-command-and-control-communication/LICENSE b/personas/_shared/skills/analyzing-command-and-control-communication/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-command-and-control-communication/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-command-and-control-communication/SKILL.md b/personas/_shared/skills/analyzing-command-and-control-communication/SKILL.md new file mode 100644 index 0000000..351cf7b --- /dev/null +++ b/personas/_shared/skills/analyzing-command-and-control-communication/SKILL.md @@ -0,0 +1,397 @@ +--- +name: analyzing-command-and-control-communication +description: 'Analyzes malware command-and-control (C2) communication protocols to understand beacon patterns, command structures, + data encoding, and infrastructure. Covers HTTP, HTTPS, DNS, and custom protocol C2 analysis for detection development and + threat intelligence. Activates for requests involving C2 analysis, beacon detection, C2 protocol reverse engineering, or + command-and-control infrastructure mapping. + + ' +domain: cybersecurity +subdomain: malware-analysis +tags: +- malware +- C2 +- command-and-control +- beacon +- protocol-analysis +version: 1.0.0 +author: mahipal +license: Apache-2.0 +nist_csf: +- DE.AE-02 +- RS.AN-03 +- ID.RA-01 +- DE.CM-01 +--- + +# Analyzing Command-and-Control Communication + +## When to Use + +- Reverse engineering a malware sample has revealed network communication that needs protocol analysis +- Building network-level detection signatures for a specific C2 framework (Cobalt Strike, Metasploit, Sliver) +- Mapping C2 infrastructure including primary servers, fallback domains, and dead drops +- Analyzing encrypted or encoded C2 traffic to understand the command set and data format +- Attributing malware to a threat actor based on C2 infrastructure patterns and tooling + +**Do not use** for general network anomaly detection; this is specifically for understanding known or suspected C2 protocols from malware analysis. + +## Prerequisites + +- PCAP capture of malware network traffic (from sandbox, network tap, or full packet capture) +- Wireshark/tshark for packet-level analysis +- Reverse engineering tools (Ghidra, dnSpy) for understanding C2 code in the malware binary +- Python 3.8+ with `scapy`, `dpkt`, and `requests` for protocol analysis and replay +- Threat intelligence databases for C2 infrastructure correlation (VirusTotal, Shodan, Censys) +- JA3/JA3S fingerprint databases for TLS-based C2 identification + +## Workflow + +### Step 1: Identify the C2 Channel + +Determine the protocol and transport used for C2 communication: + +``` +C2 Communication Channels: +━━━━━━━━━━━━━━━━━━━━━━━━━ +HTTP/HTTPS: Most common; uses standard web traffic to blend in + Indicators: Regular POST/GET requests, specific URI patterns, custom headers + +DNS: Tunneling data through DNS queries and responses + Indicators: High-volume TXT queries, long subdomain names, high entropy + +Custom TCP/UDP: Proprietary binary protocol on non-standard port + Indicators: Non-HTTP traffic on high ports, unknown protocol + +ICMP: Data encoded in ICMP echo/reply payloads + Indicators: ICMP packets with large or non-standard payloads + +WebSocket: Persistent bidirectional connection for real-time C2 + Indicators: WebSocket upgrade followed by binary frames + +Cloud Services: Using legitimate APIs (Telegram, Discord, Slack, GitHub) + Indicators: API calls to cloud services from unexpected processes + +Email: SMTP/IMAP for C2 commands and data exfiltration + Indicators: Automated email operations from non-email processes +``` + +### Step 2: Analyze Beacon Pattern + +Characterize the periodic communication pattern: + +```python +from scapy.all import rdpcap, IP, TCP +from collections import defaultdict +import statistics +import json + +packets = rdpcap("c2_traffic.pcap") + +# Group TCP SYN packets by destination +connections = defaultdict(list) +for pkt in packets: + if IP in pkt and TCP in pkt and (pkt[TCP].flags & 0x02): + key = f"{pkt[IP].dst}:{pkt[TCP].dport}" + connections[key].append(float(pkt.time)) + +# Analyze each destination for beaconing +for dst, times in sorted(connections.items()): + if len(times) < 3: + continue + + intervals = [times[i+1] - times[i] for i in range(len(times)-1)] + avg_interval = statistics.mean(intervals) + stdev = statistics.stdev(intervals) if len(intervals) > 1 else 0 + jitter_pct = (stdev / avg_interval * 100) if avg_interval > 0 else 0 + duration = times[-1] - times[0] + + beacon_data = { + "destination": dst, + "connections": len(times), + "duration_seconds": round(duration, 1), + "avg_interval_seconds": round(avg_interval, 1), + "stdev_seconds": round(stdev, 1), + "jitter_percent": round(jitter_pct, 1), + "is_beacon": 5 < avg_interval < 7200 and jitter_pct < 25, + } + + if beacon_data["is_beacon"]: + print(f"[!] BEACON DETECTED: {dst}") + print(f" Interval: {avg_interval:.0f}s +/- {stdev:.0f}s ({jitter_pct:.0f}% jitter)") + print(f" Sessions: {len(times)} over {duration:.0f}s") +``` + +### Step 3: Decode C2 Protocol Structure + +Reverse engineer the message format from captured traffic: + +```python +# HTTP-based C2 protocol analysis +import dpkt +import base64 + +with open("c2_traffic.pcap", "rb") as f: + pcap = dpkt.pcap.Reader(f) + +for ts, buf in pcap: + eth = dpkt.ethernet.Ethernet(buf) + if not isinstance(eth.data, dpkt.ip.IP): + continue + ip = eth.data + if not isinstance(ip.data, dpkt.tcp.TCP): + continue + tcp = ip.data + + if tcp.dport == 80 or tcp.dport == 443: + if len(tcp.data) > 0: + try: + http = dpkt.http.Request(tcp.data) + print(f"\n--- C2 REQUEST ---") + print(f"Method: {http.method}") + print(f"URI: {http.uri}") + print(f"Headers: {dict(http.headers)}") + if http.body: + print(f"Body ({len(http.body)} bytes):") + # Try Base64 decode + try: + decoded = base64.b64decode(http.body) + print(f" Decoded: {decoded[:200]}") + except: + print(f" Raw: {http.body[:200]}") + except: + pass +``` + +### Step 4: Identify C2 Framework + +Match observed patterns to known C2 frameworks: + +``` +Known C2 Framework Signatures: +━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Cobalt Strike: + - Default URIs: /pixel, /submit.php, /___utm.gif, /ca, /dpixel + - Malleable C2 profiles customize all traffic characteristics + - JA3: varies by profile, catalog at ja3er.com + - Watermark in beacon config (unique per license) + - Config extraction: use CobaltStrikeParser or 1768.py + +Metasploit/Meterpreter: + - Default staging URI patterns: random 4-char checksum + - Reverse HTTP(S) handler patterns + - Meterpreter TLV (Type-Length-Value) protocol structure + +Sliver: + - mTLS, HTTP, DNS, WireGuard transport options + - Protobuf-encoded messages + - Unique implant ID in communication + +Covenant: + - .NET-based C2 framework + - HTTP with customizable profiles + - Task-based command execution + +PoshC2: + - PowerShell/C# based + - HTTP with encrypted payloads + - Cookie-based session management +``` + +```bash +# Extract Cobalt Strike beacon configuration from PCAP or sample +python3 << 'PYEOF' +# Using CobaltStrikeParser (pip install cobalt-strike-parser) +from cobalt_strike_parser import BeaconConfig + +try: + config = BeaconConfig.from_file("suspect.exe") + print("Cobalt Strike Beacon Configuration:") + for key, value in config.items(): + print(f" {key}: {value}") +except Exception as e: + print(f"Not a Cobalt Strike beacon or parse error: {e}") +PYEOF +``` + +### Step 5: Map C2 Infrastructure + +Document the full C2 infrastructure and failover mechanisms: + +```python +# Infrastructure mapping +import requests +import json + +c2_indicators = { + "primary_c2": "185.220.101.42", + "domains": ["update.malicious.com", "backup.evil.net"], + "ports": [443, 8443], + "failover_dns": ["ns1.malicious-dns.com"], +} + +# Enrich with Shodan +def shodan_lookup(ip, api_key): + resp = requests.get(f"https://api.shodan.io/shodan/host/{ip}?key={api_key}") + if resp.status_code == 200: + data = resp.json() + return { + "ip": ip, + "ports": data.get("ports", []), + "os": data.get("os"), + "org": data.get("org"), + "asn": data.get("asn"), + "country": data.get("country_code"), + "hostnames": data.get("hostnames", []), + "last_update": data.get("last_update"), + } + return None + +# Enrich with passive DNS +def pdns_lookup(domain): + # Using VirusTotal passive DNS + resp = requests.get( + f"https://www.virustotal.com/api/v3/domains/{domain}/resolutions", + headers={"x-apikey": VT_API_KEY} + ) + if resp.status_code == 200: + data = resp.json() + resolutions = [] + for r in data.get("data", []): + resolutions.append({ + "ip": r["attributes"]["ip_address"], + "date": r["attributes"]["date"], + }) + return resolutions + return [] +``` + +### Step 6: Create Network Detection Signatures + +Build detection rules based on analyzed C2 characteristics: + +```bash +# Suricata rules for the analyzed C2 +cat << 'EOF' > c2_detection.rules +# HTTP beacon pattern +alert http $HOME_NET any -> $EXTERNAL_NET any ( + msg:"MALWARE MalwareX C2 HTTP Beacon"; + flow:established,to_server; + http.method; content:"POST"; + http.uri; content:"/gate.php"; startswith; + http.header; content:"User-Agent: Mozilla/5.0 (compatible; MSIE 10.0)"; + threshold:type threshold, track by_src, count 5, seconds 600; + sid:9000010; rev:1; +) + +# JA3 fingerprint match +alert tls $HOME_NET any -> $EXTERNAL_NET any ( + msg:"MALWARE MalwareX TLS JA3 Fingerprint"; + ja3.hash; content:"a0e9f5d64349fb13191bc781f81f42e1"; + sid:9000011; rev:1; +) + +# DNS beacon detection (high-entropy subdomain) +alert dns $HOME_NET any -> any any ( + msg:"MALWARE Suspected DNS C2 Tunneling"; + dns.query; pcre:"/^[a-z0-9]{20,}\./"; + threshold:type threshold, track by_src, count 10, seconds 60; + sid:9000012; rev:1; +) + +# Certificate-based detection +alert tls $HOME_NET any -> $EXTERNAL_NET any ( + msg:"MALWARE MalwareX Self-Signed C2 Certificate"; + tls.cert_subject; content:"CN=update.malicious.com"; + sid:9000013; rev:1; +) +EOF +``` + +## Key Concepts + +| Term | Definition | +|------|------------| +| **Beaconing** | Periodic check-in communication from malware to C2 server at regular intervals, often with jitter to avoid pattern detection | +| **Jitter** | Randomization applied to beacon interval (e.g., 60s +/- 15%) to make the timing pattern less predictable and harder to detect | +| **Malleable C2** | Cobalt Strike feature allowing operators to customize all aspects of C2 traffic (URIs, headers, encoding) to mimic legitimate services | +| **Dead Drop** | Intermediate location (paste site, cloud storage, social media) where C2 commands are posted for the malware to retrieve | +| **Domain Fronting** | Using a trusted CDN domain in the TLS SNI while routing to a different backend, making C2 traffic appear to go to a legitimate service | +| **Fast Flux** | Rapidly changing DNS records for C2 domains to distribute across many IPs and resist takedown efforts | +| **C2 Framework** | Software toolkit providing C2 server, implant generator, and operator interface (Cobalt Strike, Metasploit, Sliver, Covenant) | + +## Tools & Systems + +- **Wireshark**: Packet analyzer for detailed C2 protocol analysis at the packet level +- **RITA (Real Intelligence Threat Analytics)**: Open-source tool analyzing Zeek logs for beacon detection and DNS tunneling +- **CobaltStrikeParser**: Tool extracting Cobalt Strike beacon configuration from samples and memory dumps +- **JA3/JA3S**: TLS fingerprinting method for identifying C2 frameworks by their TLS implementation characteristics +- **Shodan/Censys**: Internet scanning platforms for mapping C2 infrastructure and identifying related servers + +## Common Scenarios + +### Scenario: Reverse Engineering a Custom C2 Protocol + +**Context**: A malware sample communicates with its C2 server using an unknown binary protocol over TCP port 8443. The protocol needs to be decoded to understand the command set and build detection signatures. + +**Approach**: +1. Filter PCAP for TCP port 8443 conversations and extract the TCP streams +2. Analyze the first few exchanges to identify the handshake/authentication mechanism +3. Map the message structure (length prefix, type field, payload encoding) +4. Cross-reference with Ghidra disassembly of the send/receive functions in the malware +5. Identify the command dispatcher and document each command code's function +6. Build a protocol decoder in Python for ongoing traffic analysis +7. Create Suricata rules matching the protocol handshake or static header bytes + +**Pitfalls**: +- Assuming the protocol is static; some C2 frameworks negotiate encryption during the handshake +- Not capturing enough traffic to see all command types (some commands are rare) +- Missing fallback C2 channels (DNS, ICMP) that activate when the primary channel fails +- Confusing encrypted payload data with the protocol framing structure + +## Output Format + +``` +C2 COMMUNICATION ANALYSIS REPORT +=================================== +Sample: malware.exe (SHA-256: e3b0c44...) +C2 Framework: Cobalt Strike 4.9 + +BEACON CONFIGURATION +C2 Server: hxxps://185.220.101[.]42/updates +Beacon Type: HTTPS (reverse) +Sleep: 60 seconds +Jitter: 15% +User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) +URI (GET): /dpixel +URI (POST): /submit.php +Watermark: 1234567890 + +PROTOCOL ANALYSIS +Transport: HTTPS (TLS 1.2) +JA3 Hash: a0e9f5d64349fb13191bc781f81f42e1 +Certificate: CN=Microsoft Update (self-signed) +Encoding: Base64 with XOR key 0x69 +Command Format: [4B length][4B command_id][payload] + +COMMAND SET +0x01 - Sleep Change beacon interval +0x02 - Shell Execute cmd.exe command +0x03 - Download Transfer file from C2 +0x04 - Upload Exfiltrate file to C2 +0x05 - Inject Process injection +0x06 - Keylog Start keylogger +0x07 - Screenshot Capture screen + +INFRASTRUCTURE +Primary: 185.220.101[.]42 (AS12345, Hosting Co, NL) +Failover: 91.215.85[.]17 (AS67890, VPS Provider, RU) +DNS: update.malicious[.]com -> 185.220.101[.]42 +Registrar: NameCheap +Registration: 2025-09-01 + +DETECTION SIGNATURES +SID 9000010: HTTP beacon pattern +SID 9000011: JA3 TLS fingerprint +SID 9000013: C2 certificate match +``` diff --git a/personas/_shared/skills/analyzing-command-and-control-communication/references/api-reference.md b/personas/_shared/skills/analyzing-command-and-control-communication/references/api-reference.md new file mode 100644 index 0000000..5aaa095 --- /dev/null +++ b/personas/_shared/skills/analyzing-command-and-control-communication/references/api-reference.md @@ -0,0 +1,112 @@ +# API Reference: C2 Communication Analysis Tools + +## Scapy - Packet Analysis Library (Python) + +### Reading PCAPs +```python +from scapy.all import rdpcap, IP, TCP, UDP, DNS, DNSQR +packets = rdpcap("capture.pcap") +``` + +### Filtering Packets +```python +# TCP SYN packets (connection initiation) +syn_pkts = [p for p in packets if TCP in p and (p[TCP].flags & 0x02)] + +# DNS queries +dns_pkts = [p for p in packets if DNS in p and p[DNS].qr == 0] + +# Access fields +pkt[IP].src # Source IP +pkt[IP].dst # Destination IP +pkt[TCP].sport # Source port +pkt[TCP].dport # Destination port +pkt[TCP].flags # TCP flags (0x02 = SYN) +float(pkt.time) # Packet timestamp +``` + +## dpkt - Packet Parsing Library (Python) + +### Reading PCAPs +```python +import dpkt +with open("capture.pcap", "rb") as f: + pcap = dpkt.pcap.Reader(f) + for timestamp, buf in pcap: + eth = dpkt.ethernet.Ethernet(buf) + ip = eth.data + tcp = ip.data +``` + +### HTTP Request Parsing +```python +http = dpkt.http.Request(tcp.data) +http.method # GET, POST +http.uri # /path +http.headers # dict of headers +http.body # POST body +``` + +## tshark - CLI Wireshark + +### Beacon Analysis +```bash +tshark -r capture.pcap -T fields -e ip.dst -e tcp.dstport -e frame.time_epoch \ + -Y "tcp.flags.syn==1" > syn_times.csv +``` + +### HTTP Extraction +```bash +tshark -r capture.pcap -Y "http.request" -T fields \ + -e http.request.method -e http.host -e http.request.uri -e http.user_agent +``` + +### DNS Extraction +```bash +tshark -r capture.pcap -Y "dns.qr==0" -T fields \ + -e dns.qry.name -e dns.qry.type -e ip.src +``` + +### JA3 TLS Fingerprinting +```bash +tshark -r capture.pcap -Y "tls.handshake.type==1" -T fields \ + -e ip.src -e tls.handshake.ja3 +``` + +## CobaltStrikeParser - Beacon Config Extraction + +### Usage +```python +from cobalt_strike_parser import BeaconConfig +config = BeaconConfig.from_file("beacon.bin") +for key, value in config.items(): + print(f"{key}: {value}") +``` + +### Key Config Fields +| Field | Description | +|-------|-------------| +| `BeaconType` | HTTP, HTTPS, DNS, SMB | +| `C2Server` | Primary C2 URL | +| `SleepTime` | Beacon interval (ms) | +| `Jitter` | Jitter percentage | +| `UserAgent` | HTTP User-Agent string | +| `Watermark` | License watermark ID | + +## Suricata - Network IDS Rules + +### Rule Syntax +``` +alert -> (msg:""; ; sid:N; rev:N;) +``` + +### Key Keywords +| Keyword | Purpose | +|---------|---------| +| `http.method` | Match HTTP method | +| `http.uri` | Match request URI | +| `http.header` | Match header content | +| `ja3.hash` | Match JA3 TLS fingerprint | +| `dns.query` | Match DNS query name | +| `tls.cert_subject` | Match TLS certificate CN | +| `threshold` | Rate-based detection | diff --git a/personas/_shared/skills/analyzing-command-and-control-communication/scripts/agent.py b/personas/_shared/skills/analyzing-command-and-control-communication/scripts/agent.py new file mode 100644 index 0000000..dcd09f5 --- /dev/null +++ b/personas/_shared/skills/analyzing-command-and-control-communication/scripts/agent.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python3 +"""C2 communication analysis agent for beacon detection and protocol decoding.""" + +import statistics +import base64 +import os +import sys +from collections import defaultdict + +try: + from scapy.all import rdpcap, IP, TCP, DNS, DNSQR + HAS_SCAPY = True +except ImportError: + HAS_SCAPY = False + +try: + import dpkt + HAS_DPKT = True +except ImportError: + HAS_DPKT = False + + +def detect_beacons(pcap_path, min_connections=5, max_jitter_pct=25.0): + """Analyze PCAP for periodic beacon patterns using TCP SYN timing.""" + if not HAS_SCAPY: + print("[ERROR] scapy not installed: pip install scapy") + return [] + packets = rdpcap(pcap_path) + connections = defaultdict(list) + for pkt in packets: + if IP in pkt and TCP in pkt and (pkt[TCP].flags & 0x02): + key = f"{pkt[IP].dst}:{pkt[TCP].dport}" + connections[key].append(float(pkt.time)) + beacons = [] + for dst, times in sorted(connections.items()): + if len(times) < min_connections: + continue + intervals = [times[i + 1] - times[i] for i in range(len(times) - 1)] + avg_interval = statistics.mean(intervals) + stdev = statistics.stdev(intervals) if len(intervals) > 1 else 0 + jitter_pct = (stdev / avg_interval * 100) if avg_interval > 0 else 0 + is_beacon = 5 < avg_interval < 7200 and jitter_pct < max_jitter_pct + record = { + "destination": dst, + "connections": len(times), + "duration_seconds": round(times[-1] - times[0], 1), + "avg_interval_seconds": round(avg_interval, 1), + "stdev_seconds": round(stdev, 1), + "jitter_percent": round(jitter_pct, 1), + "is_beacon": is_beacon, + } + if is_beacon: + beacons.append(record) + return beacons + + +def extract_http_requests(pcap_path): + """Extract HTTP requests from a PCAP file using dpkt.""" + if not HAS_DPKT: + print("[ERROR] dpkt not installed: pip install dpkt") + return [] + requests = [] + with open(pcap_path, "rb") as f: + pcap = dpkt.pcap.Reader(f) + for ts, buf in pcap: + try: + eth = dpkt.ethernet.Ethernet(buf) + if not isinstance(eth.data, dpkt.ip.IP): + continue + ip = eth.data + if not isinstance(ip.data, dpkt.tcp.TCP): + continue + tcp = ip.data + if len(tcp.data) == 0: + continue + try: + http = dpkt.http.Request(tcp.data) + decoded_body = None + if http.body: + try: + decoded_body = base64.b64decode(http.body).decode("utf-8", errors="replace") + except Exception: + decoded_body = http.body[:200] + requests.append({ + "timestamp": ts, + "src_ip": ".".join(str(b) for b in ip.src), + "dst_ip": ".".join(str(b) for b in ip.dst), + "dst_port": tcp.dport, + "method": http.method, + "uri": http.uri, + "host": http.headers.get("host", ""), + "user_agent": http.headers.get("user-agent", ""), + "body_size": len(http.body) if http.body else 0, + "decoded_body_preview": decoded_body, + }) + except (dpkt.dpkt.NeedData, dpkt.dpkt.UnpackError): + pass + except Exception: + continue + return requests + + +def extract_dns_queries(pcap_path): + """Extract DNS queries from a PCAP for C2 domain identification.""" + if not HAS_SCAPY: + return [] + packets = rdpcap(pcap_path) + queries = [] + for pkt in packets: + if DNS in pkt and pkt[DNS].qr == 0 and DNSQR in pkt: + qname = pkt[DNSQR].qname.decode("utf-8", errors="replace").rstrip(".") + queries.append({ + "src_ip": pkt[IP].src if IP in pkt else "?", + "query": qname, + "type": pkt[DNSQR].qtype, + }) + return queries + + +def identify_c2_framework(http_requests): + """Match HTTP request patterns against known C2 framework signatures.""" + cs_uris = ["/pixel", "/submit.php", "/__utm.gif", "/ca", "/dpixel", + "/push", "/visit.js", "/tab_icon"] + framework_hits = [] + for req in http_requests: + uri = req.get("uri", "") + ua = req.get("user_agent", "") + for cs_uri in cs_uris: + if cs_uri in uri: + framework_hits.append({ + "framework": "Cobalt Strike", + "indicator": f"URI pattern: {cs_uri}", + "request": req, + }) + break + if "MeterSSL" in ua or len(uri) == 5 and uri.startswith("/"): + framework_hits.append({ + "framework": "Metasploit/Meterpreter", + "indicator": f"URI/UA pattern: {uri} / {ua[:50]}", + "request": req, + }) + return framework_hits + + +def generate_suricata_rules(beacons, http_requests): + """Generate Suricata IDS rules from observed C2 patterns.""" + rules = [] + sid = 9000100 + for beacon in beacons: + dst_ip, dst_port = beacon["destination"].rsplit(":", 1) + rules.append( + f'alert tcp $HOME_NET any -> {dst_ip} {dst_port} (' + f'msg:"MALWARE Detected C2 Beacon to {dst_ip}:{dst_port}"; ' + f'flow:established,to_server; ' + f'threshold:type threshold, track by_src, count 5, seconds 600; ' + f'sid:{sid}; rev:1;)' + ) + sid += 1 + for req in http_requests[:5]: + if req.get("uri"): + uri = req["uri"] + rules.append( + f'alert http $HOME_NET any -> $EXTERNAL_NET any (' + f'msg:"MALWARE Suspected C2 HTTP Request {uri}"; ' + f'flow:established,to_server; ' + f'http.method; content:"{req["method"]}"; ' + f'http.uri; content:"{uri}"; ' + f'sid:{sid}; rev:1;)' + ) + sid += 1 + return rules + + +if __name__ == "__main__": + print("=" * 60) + print("C2 Communication Analysis Agent") + print("Beacon detection, protocol decoding, signature generation") + print("=" * 60) + + pcap_file = sys.argv[1] if len(sys.argv) > 1 else None + + if pcap_file and os.path.exists(pcap_file): + print(f"\n[*] Analyzing PCAP: {pcap_file}") + + print("\n--- Beacon Detection ---") + beacons = detect_beacons(pcap_file) + for b in beacons: + print(f"[!] BEACON: {b['destination']} " + f"interval={b['avg_interval_seconds']}s " + f"jitter={b['jitter_percent']}% " + f"sessions={b['connections']}") + + print("\n--- HTTP Requests ---") + http_reqs = extract_http_requests(pcap_file) + for r in http_reqs[:10]: + print(f" {r['method']} {r['host']}{r['uri']}") + + print("\n--- DNS Queries ---") + dns_qs = extract_dns_queries(pcap_file) + for q in dns_qs[:10]: + print(f" {q['src_ip']} -> {q['query']}") + + print("\n--- C2 Framework Identification ---") + hits = identify_c2_framework(http_reqs) + for h in hits: + print(f"[!] {h['framework']}: {h['indicator']}") + + print("\n--- Suricata Rules ---") + rules = generate_suricata_rules(beacons, http_reqs) + for r in rules: + print(r) + else: + print("\n[DEMO] Usage: python agent.py ") + print("[*] Provide a PCAP file to analyze for C2 communication patterns.") diff --git a/personas/_shared/skills/analyzing-cyber-kill-chain/LICENSE b/personas/_shared/skills/analyzing-cyber-kill-chain/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-cyber-kill-chain/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-cyber-kill-chain/SKILL.md b/personas/_shared/skills/analyzing-cyber-kill-chain/SKILL.md new file mode 100644 index 0000000..a399df0 --- /dev/null +++ b/personas/_shared/skills/analyzing-cyber-kill-chain/SKILL.md @@ -0,0 +1,140 @@ +--- +name: analyzing-cyber-kill-chain +description: 'Analyzes intrusion activity against the Lockheed Martin Cyber Kill Chain framework to identify which phases + an adversary has completed, where defenses succeeded or failed, and what controls would have interrupted the attack at earlier + phases. Use when conducting post-incident analysis, building prevention-focused security controls, or mapping detection + gaps to kill chain phases. Activates for requests involving kill chain analysis, intrusion kill chain, attack phase mapping, + or Lockheed Martin kill chain framework. + + ' +domain: cybersecurity +subdomain: threat-intelligence +tags: +- kill-chain +- Lockheed-Martin +- MITRE-ATT&CK +- intrusion-analysis +- defense-in-depth +- NIST-CSF +version: 1.0.0 +author: team-cybersecurity +license: Apache-2.0 +nist_csf: +- ID.RA-01 +- ID.RA-05 +- DE.CM-01 +- DE.AE-02 +--- +# Analyzing Cyber Kill Chain + +## When to Use + +Use this skill when: +- Conducting post-incident analysis to determine how far an adversary progressed through an attack sequence +- Designing layered defensive controls with the goal of interrupting attacks at the earliest possible phase +- Producing threat intelligence reports that communicate attack progression to non-technical stakeholders + +**Do not use** this skill as a standalone framework — combine with MITRE ATT&CK for technique-level granularity beyond what the 7-phase kill chain provides. + +## Prerequisites + +- Complete incident timeline with forensic artifacts mapped to specific adversary actions +- MITRE ATT&CK Enterprise matrix for technique-level mapping within each kill chain phase +- Access to threat intelligence on the suspected adversary group's typical kill chain progression +- Post-incident report or IR timeline from responding team + +## Workflow + +### Step 1: Map Observed Actions to Kill Chain Phases + +The Lockheed Martin Cyber Kill Chain consists of seven phases. Map all observed adversary actions: + +**Phase 1 - Reconnaissance**: Adversary gathers target information before attack. +- Indicators: DNS queries from adversary IP, LinkedIn scraping, job posting analysis, Shodan scans of organization infrastructure + +**Phase 2 - Weaponization**: Adversary creates attack tool (malware + exploit). +- Indicators: Malware compilation timestamps, exploit document metadata, builder artifacts in malware samples + +**Phase 3 - Delivery**: Adversary transmits weapon to target. +- Indicators: Phishing emails, malicious attachments, drive-by downloads, USB drops, supply chain compromise + +**Phase 4 - Exploitation**: Adversary exploits vulnerability to execute code. +- Indicators: CVE exploitation events in application/OS logs, memory corruption artifacts, shellcode execution + +**Phase 5 - Installation**: Adversary establishes persistence on target. +- Indicators: New scheduled tasks, registry run keys, service installation, web shells, bootkits + +**Phase 6 - Command & Control (C2)**: Adversary communicates with compromised system. +- Indicators: Beaconing traffic (regular intervals), DNS tunneling, HTTPS to uncommon domains, C2 framework signatures (Cobalt Strike, Sliver) + +**Phase 7 - Actions on Objectives**: Adversary achieves goals. +- Indicators: Data staging/exfiltration, lateral movement, ransomware execution, destructive activity + +### Step 2: Identify Phase Completion and Detection Points + +Create a phase matrix for the incident: +``` +Phase 1: Recon → Completed (undetected) +Phase 2: Weaponize → Completed (undetected — pre-attack) +Phase 3: Delivery → Completed; phishing email bypassed SEG +Phase 4: Exploit → Completed; CVE-2023-23397 exploited +Phase 5: Install → DETECTED: EDR flagged scheduled task creation (attack stalled here) +Phase 6: C2 → Not achieved (installation blocked) +Phase 7: Objectives → Not achieved +``` + +For each phase completed without detection, document the defensive control gap. + +### Step 3: Map to MITRE ATT&CK for Technique Detail + +Each kill chain phase maps to multiple ATT&CK tactics: +- Delivery → Initial Access (TA0001) +- Exploitation → Execution (TA0002) +- Installation → Persistence (TA0003), Privilege Escalation (TA0004) +- C2 → Command and Control (TA0011) +- Actions on Objectives → Exfiltration (TA0010), Impact (TA0040) + +Within each phase, enumerate specific ATT&CK techniques observed and map to existing detections. + +### Step 4: Identify Courses of Action per Phase + +For each phase, document applicable defensive courses of action (COAs): +- **Detect COA**: What detection would alert on adversary activity in this phase? +- **Deny COA**: What control would prevent the adversary from completing this phase? +- **Disrupt COA**: What control would interrupt the adversary mid-phase? +- **Degrade COA**: What control would reduce the adversary's effectiveness in this phase? +- **Deceive COA**: What deception (honeypots, canary tokens) would expose activity in this phase? +- **Destroy COA**: What active defense capability would neutralize adversary infrastructure? + +### Step 5: Produce Kill Chain Analysis Report + +Structure findings as: +1. Attack narrative (timeline of phases) +2. Phase-by-phase analysis with evidence +3. Detection point analysis (what worked, what failed) +4. Defensive recommendation per phase prioritized by cost/effectiveness +5. Control improvement roadmap + +## Key Concepts + +| Term | Definition | +|------|-----------| +| **Kill Chain** | Sequential model of adversary intrusion phases; breaking any link theoretically stops the attack | +| **Courses of Action (COA)** | Defensive responses mapped to each kill chain phase: detect, deny, disrupt, degrade, deceive, destroy | +| **Beaconing** | Regular, periodic C2 check-in pattern from compromised host to adversary server; detectable by frequency analysis | +| **Phase Completion** | Adversary successfully finishes a kill chain phase and progresses to the next; defense-in-depth aims to prevent this | +| **Intelligence Gain/Loss** | Analysis of whether detecting at Phase 5 (vs. Phase 3) reduced intelligence about adversary capabilities or intent | + +## Tools & Systems + +- **MITRE ATT&CK Navigator**: Overlay kill chain phases with ATT&CK technique coverage for integrated analysis +- **Elastic Security EQL**: Event Query Language for querying multi-phase attack sequences in Elastic SIEM +- **Splunk ES**: Timeline visualization and correlation searches for kill chain phase sequencing +- **MISP**: Kill chain tagging via galaxy clusters for structured incident event documentation + +## Common Pitfalls + +- **Linear assumption**: Adversaries don't always progress linearly — they may skip phases (weaponization already complete from previous campaign) or loop back (re-establish C2 after detection). +- **Ignoring Phases 1 and 2**: Reconnaissance and weaponization occur before the defender has visibility. Intelligence about these phases requires external sources (OSINT, threat intelligence). +- **Missing insider threats**: The kill chain was designed for external adversaries. Insider threats may skip directly to Phase 7 without traversing earlier phases. +- **Confusing with ATT&CK tactics**: The 7-phase kill chain and 14 ATT&CK tactics are complementary but not directly equivalent. Maintain distinction to prevent analytic confusion. diff --git a/personas/_shared/skills/analyzing-cyber-kill-chain/references/api-reference.md b/personas/_shared/skills/analyzing-cyber-kill-chain/references/api-reference.md new file mode 100644 index 0000000..9b2e436 --- /dev/null +++ b/personas/_shared/skills/analyzing-cyber-kill-chain/references/api-reference.md @@ -0,0 +1,96 @@ +# API Reference: Cyber Kill Chain Analysis Tools + +## Lockheed Martin Cyber Kill Chain Phases + +| Phase | Name | MITRE ATT&CK Tactic | +|-------|------|---------------------| +| 1 | Reconnaissance | TA0043 Reconnaissance | +| 2 | Weaponization | TA0042 Resource Development | +| 3 | Delivery | TA0001 Initial Access | +| 4 | Exploitation | TA0002 Execution | +| 5 | Installation | TA0003 Persistence, TA0004 Privilege Escalation | +| 6 | Command & Control | TA0011 Command and Control | +| 7 | Actions on Objectives | TA0010 Exfiltration, TA0040 Impact | + +## Courses of Action (COA) Matrix + +| COA | Description | +|-----|-------------| +| Detect | Alert on adversary activity | +| Deny | Prevent phase completion | +| Disrupt | Interrupt adversary mid-phase | +| Degrade | Reduce adversary effectiveness | +| Deceive | Expose activity via deception | +| Destroy | Neutralize adversary infrastructure | + +## MITRE ATT&CK Navigator + +### JSON Layer Format +```json +{ + "name": "Kill Chain Coverage", + "versions": {"navigator": "4.8", "layer": "4.4", "attack": "13"}, + "domain": "enterprise-attack", + "techniques": [ + {"techniqueID": "T1566", "color": "#ff6666", "comment": "Phase 3: Delivery"} + ] +} +``` + +### CLI Usage +```bash +# Export layer via ATT&CK Navigator API +curl -X POST https://mitre-attack.github.io/attack-navigator/api/layers \ + -d @layer.json -o coverage_map.svg +``` + +## Splunk - Kill Chain Phase Queries + +### Phase 3 Detection (Delivery) +```spl +index=email sourcetype=exchange action=delivered +| eval has_macro=if(match(attachment, "\.(docm|xlsm|pptm)$"), 1, 0) +| where has_macro=1 +| stats count by sender, subject, attachment +``` + +### Phase 6 Detection (C2) +```spl +index=proxy OR index=firewall +| stats count AS connections, dc(dest) AS unique_dests by src_ip +| where connections > 100 AND unique_dests < 3 +| sort - connections +``` + +## Elastic Security EQL + +### Multi-Phase Detection +```eql +sequence by host.name with maxspan=1h + [process where event.action == "start" and process.name == "WINWORD.EXE"] + [process where event.action == "start" and process.parent.name == "WINWORD.EXE"] + [network where destination.port == 443 and not destination.ip in ("known_good")] +``` + +## MISP - Kill Chain Tagging + +### Galaxy Cluster Tags +``` +misp-galaxy:kill-chain="reconnaissance" +misp-galaxy:kill-chain="delivery" +misp-galaxy:kill-chain="exploitation" +misp-galaxy:kill-chain="installation" +misp-galaxy:kill-chain="command-and-control" +misp-galaxy:kill-chain="actions-on-objectives" +``` + +### PyMISP Event Tagging +```python +from pymisp import PyMISP, MISPEvent + +misp = PyMISP("https://misp.example.com", "API_KEY") +event = MISPEvent() +event.add_tag("kill-chain:delivery") +event.add_tag("mitre-attack-pattern:T1566 - Phishing") +misp.update_event(event) +``` diff --git a/personas/_shared/skills/analyzing-cyber-kill-chain/scripts/agent.py b/personas/_shared/skills/analyzing-cyber-kill-chain/scripts/agent.py new file mode 100644 index 0000000..d1cef63 --- /dev/null +++ b/personas/_shared/skills/analyzing-cyber-kill-chain/scripts/agent.py @@ -0,0 +1,241 @@ +#!/usr/bin/env python3 +"""Cyber Kill Chain analysis agent for mapping incidents to Lockheed Martin kill chain phases.""" + +import datetime + + +KILL_CHAIN_PHASES = { + 1: { + "name": "Reconnaissance", + "description": "Adversary gathers target information", + "indicators": [ + "DNS queries from adversary IP", + "LinkedIn/social media scraping", + "Shodan/Censys scans of infrastructure", + "Job posting analysis for technology stack", + "WHOIS lookups on organization domains", + ], + "mitre_tactics": ["TA0043 - Reconnaissance"], + "coas": { + "detect": "Monitor for anomalous DNS lookups and port scans from single sources", + "deny": "Limit public-facing information, restrict DNS zone transfers", + "disrupt": "Block scanning IPs at perimeter firewall", + "degrade": "Return honeypot responses to recon probes", + "deceive": "Deploy decoy infrastructure and fake employee profiles", + }, + }, + 2: { + "name": "Weaponization", + "description": "Adversary creates attack tool (malware + exploit)", + "indicators": [ + "Malware compilation timestamps", + "Exploit document metadata", + "Builder tool artifacts in samples", + "Reused infrastructure from previous campaigns", + ], + "mitre_tactics": ["TA0042 - Resource Development"], + "coas": { + "detect": "Threat intelligence on adversary tooling and TTPs", + "deny": "Patch vulnerabilities targeted by known exploit kits", + "disrupt": "N/A (occurs outside defender visibility)", + "degrade": "Application hardening reduces exploit reliability", + "deceive": "Share deceptive vulnerability information", + }, + }, + 3: { + "name": "Delivery", + "description": "Adversary transmits weapon to target", + "indicators": [ + "Phishing emails with malicious attachments", + "Drive-by download URLs", + "USB device insertion events", + "Supply chain compromise artifacts", + "Watering hole website modifications", + ], + "mitre_tactics": ["TA0001 - Initial Access"], + "coas": { + "detect": "Email security gateway alerts, proxy URL filtering alerts", + "deny": "Block malicious attachments, URL filtering, USB device control", + "disrupt": "Quarantine suspicious emails before delivery", + "degrade": "Sandbox detonation of attachments delays delivery", + "deceive": "Canary documents in email attachments", + }, + }, + 4: { + "name": "Exploitation", + "description": "Adversary exploits vulnerability to execute code", + "indicators": [ + "CVE exploitation in application logs", + "Memory corruption crash dumps", + "Shellcode execution artifacts", + "Exploit kit landing page access", + ], + "mitre_tactics": ["TA0002 - Execution"], + "coas": { + "detect": "EDR behavioral detection, exploit guard alerts", + "deny": "Patch management, application whitelisting", + "disrupt": "ASLR, DEP, CFG memory protections", + "degrade": "Sandboxed application execution (Protected View)", + "deceive": "Honeypot applications with fake vulnerabilities", + }, + }, + 5: { + "name": "Installation", + "description": "Adversary establishes persistence on target", + "indicators": [ + "New scheduled tasks or services", + "Registry Run key modifications", + "Web shell deployment", + "Startup folder additions", + "DLL search-order hijacking", + ], + "mitre_tactics": ["TA0003 - Persistence", "TA0004 - Privilege Escalation"], + "coas": { + "detect": "Sysmon EventID 11/12/13, EDR persistence monitoring", + "deny": "Application whitelisting, UAC enforcement", + "disrupt": "Real-time file integrity monitoring alerts", + "degrade": "Restrict write access to system directories", + "deceive": "Canary registry keys and file system canaries", + }, + }, + 6: { + "name": "Command & Control", + "description": "Adversary communicates with compromised system", + "indicators": [ + "Beaconing traffic at regular intervals", + "DNS tunneling (high entropy subdomain queries)", + "HTTPS to newly registered domains", + "Known C2 framework signatures", + ], + "mitre_tactics": ["TA0011 - Command and Control"], + "coas": { + "detect": "Network beacon analysis, JA3 fingerprinting, DNS monitoring", + "deny": "DNS sinkholing, firewall egress filtering", + "disrupt": "TLS inspection to identify C2 in encrypted traffic", + "degrade": "Rate-limit suspicious outbound connections", + "deceive": "C2 interception and response manipulation", + }, + }, + 7: { + "name": "Actions on Objectives", + "description": "Adversary achieves mission goals", + "indicators": [ + "Data staging and exfiltration", + "Lateral movement to additional systems", + "Ransomware encryption activity", + "Destructive operations (wiper malware)", + "Credential dumping (LSASS access)", + ], + "mitre_tactics": ["TA0010 - Exfiltration", "TA0040 - Impact"], + "coas": { + "detect": "DLP alerts, anomalous data transfers, UEBA", + "deny": "Network segmentation, data classification controls", + "disrupt": "Isolate compromised systems, kill C2 connections", + "degrade": "Encrypt sensitive data at rest (attacker gets ciphertext)", + "deceive": "Canary files and honeytoken credentials", + }, + }, +} + + +def map_event_to_phase(event_description): + """Map an incident event description to the most likely kill chain phase.""" + event_lower = event_description.lower() + keyword_phase_map = { + 1: ["recon", "scan", "enumerat", "shodan", "whois", "dns lookup"], + 2: ["weaponiz", "builder", "compile", "payload creat"], + 3: ["phish", "email", "deliver", "download", "usb", "attachment", "watering hole"], + 4: ["exploit", "cve-", "buffer overflow", "shellcode", "rce"], + 5: ["persist", "scheduled task", "registry", "run key", "service install", + "web shell", "backdoor", "startup"], + 6: ["beacon", "c2", "c&c", "command and control", "callback", "dns tunnel"], + 7: ["exfiltrat", "lateral", "ransomware", "encrypt", "data stag", "credential dump", + "mimikatz", "wiper"], + } + scores = {phase: 0 for phase in range(1, 8)} + for phase, keywords in keyword_phase_map.items(): + for kw in keywords: + if kw in event_lower: + scores[phase] += 1 + best_phase = max(scores, key=scores.get) + if scores[best_phase] == 0: + return None + return best_phase + + +def analyze_incident(events): + """Analyze a list of incident events and map to kill chain phases.""" + analysis = {phase: {"events": [], "detected": False, "completed": False} + for phase in range(1, 8)} + for event in events: + phase = map_event_to_phase(event.get("description", "")) + if phase: + analysis[phase]["events"].append(event) + analysis[phase]["completed"] = True + if event.get("detected", False): + analysis[phase]["detected"] = True + return analysis + + +def generate_report(analysis): + """Generate a kill chain analysis report.""" + report_lines = [ + "CYBER KILL CHAIN ANALYSIS REPORT", + "=" * 50, + f"Generated: {datetime.datetime.utcnow().isoformat()}Z", + "", + ] + deepest_phase = 0 + detection_phase = None + for phase_num in range(1, 8): + phase_data = analysis[phase_num] + phase_info = KILL_CHAIN_PHASES[phase_num] + if phase_data["completed"]: + deepest_phase = phase_num + if phase_data["detected"] and detection_phase is None: + detection_phase = phase_num + status = "COMPLETED" if phase_data["completed"] else "NOT REACHED" + if phase_data["detected"]: + status += " (DETECTED)" + report_lines.append(f"Phase {phase_num}: {phase_info['name']} -> {status}") + for evt in phase_data["events"]: + report_lines.append(f" - {evt.get('description', 'N/A')}") + report_lines.extend([ + "", + f"Deepest phase reached: {deepest_phase} ({KILL_CHAIN_PHASES.get(deepest_phase, {}).get('name', 'N/A')})", + f"First detection at phase: {detection_phase or 'None'}", + "", + "RECOMMENDED COURSES OF ACTION:", + ]) + for phase_num in range(1, deepest_phase + 1): + phase_info = KILL_CHAIN_PHASES[phase_num] + report_lines.append(f"\n Phase {phase_num} - {phase_info['name']}:") + for coa_type, coa_desc in phase_info["coas"].items(): + report_lines.append(f" {coa_type.upper()}: {coa_desc}") + return "\n".join(report_lines) + + +if __name__ == "__main__": + print("=" * 60) + print("Cyber Kill Chain Analysis Agent") + print("Lockheed Martin framework mapping with MITRE ATT&CK integration") + print("=" * 60) + + # Demo incident events + demo_events = [ + {"description": "Shodan scans detected from 203.0.113.50 targeting web servers", + "timestamp": "2025-09-10T08:00:00Z", "detected": False}, + {"description": "Phishing email with malicious .docm attachment delivered to 5 users", + "timestamp": "2025-09-11T09:15:00Z", "detected": False}, + {"description": "CVE-2023-23397 exploitation detected in Outlook process crash", + "timestamp": "2025-09-11T09:20:00Z", "detected": False}, + {"description": "Scheduled task created for persistence by malware dropper", + "timestamp": "2025-09-11T09:25:00Z", "detected": True}, + {"description": "C2 beacon detected to 185.220.101.42 on port 443", + "timestamp": "2025-09-11T09:30:00Z", "detected": True}, + ] + + print("\n[*] Analyzing demo incident events...") + analysis = analyze_incident(demo_events) + report = generate_report(analysis) + print(f"\n{report}") diff --git a/personas/_shared/skills/analyzing-disk-image-with-autopsy/LICENSE b/personas/_shared/skills/analyzing-disk-image-with-autopsy/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-disk-image-with-autopsy/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-disk-image-with-autopsy/SKILL.md b/personas/_shared/skills/analyzing-disk-image-with-autopsy/SKILL.md new file mode 100644 index 0000000..56b6641 --- /dev/null +++ b/personas/_shared/skills/analyzing-disk-image-with-autopsy/SKILL.md @@ -0,0 +1,264 @@ +--- +name: analyzing-disk-image-with-autopsy +description: Perform comprehensive forensic analysis of disk images using Autopsy to recover files, examine artifacts, and + build investigation timelines. +domain: cybersecurity +subdomain: digital-forensics +tags: +- forensics +- autopsy +- disk-analysis +- sleuth-kit +- file-recovery +- artifact-analysis +version: '1.0' +author: mahipal +license: Apache-2.0 +nist_csf: +- RS.AN-01 +- RS.AN-03 +- DE.AE-02 +- RS.MA-01 +--- + +# Analyzing Disk Image with Autopsy + +## When to Use +- When you have a forensic disk image and need structured analysis of its contents +- During investigations requiring file recovery, keyword searching, and timeline analysis +- When non-technical stakeholders need visual reports from forensic evidence +- For examining file system metadata, deleted files, and embedded artifacts +- When building a comprehensive case from multiple disk images + +## Prerequisites +- Autopsy 4.x installed (Windows) or Autopsy 4.x with The Sleuth Kit (Linux) +- Forensic disk image in raw (dd), E01 (EnCase), or AFF format +- Minimum 8GB RAM (16GB recommended for large images) +- Java Runtime Environment (JRE) 8+ for Autopsy +- Sufficient disk space for the Autopsy case database (2-3x image size) +- Hash databases (NSRL, known-bad hashes) for file identification + +## Workflow + +### Step 1: Install Autopsy and Configure Environment + +```bash +# On Linux, install Sleuth Kit and Autopsy +sudo apt-get install autopsy sleuthkit + +# Download Autopsy 4.x (GUI version) from official source +wget https://github.com/sleuthkit/autopsy/releases/download/autopsy-4.21.0/autopsy-4.21.0.zip +unzip autopsy-4.21.0.zip -d /opt/autopsy + +# On Windows, run the MSI installer from sleuthkit.org +# Launch Autopsy +/opt/autopsy/bin/autopsy --nosplash + +# For Sleuth Kit command-line analysis alongside Autopsy +sudo apt-get install sleuthkit +``` + +### Step 2: Create a New Case and Add the Disk Image + +``` +1. Launch Autopsy > "New Case" +2. Enter Case Name: "CASE-2024-001-Workstation" +3. Set Base Directory: /cases/case-2024-001/autopsy/ +4. Enter Case Number, Examiner Name +5. Click "Add Data Source" +6. Select "Disk Image or VM File" +7. Browse to: /cases/case-2024-001/images/evidence.dd +8. Select Time Zone of the original system +9. Configure Ingest Modules (see Step 3) +``` + +```bash +# Alternatively, use Sleuth Kit CLI to verify the image first +img_stat /cases/case-2024-001/images/evidence.dd + +# List partitions in the image +mmls /cases/case-2024-001/images/evidence.dd + +# Output example: +# DOS Partition Table +# Offset Sector: 0 +# Units are in 512-byte sectors +# Slot Start End Length Description +# 00: ----- 0000000000 0000002047 0000002048 Primary Table (#0) +# 01: 00:00 0000002048 0001026047 0001024000 NTFS (0x07) +# 02: 00:01 0001026048 0976771071 0975745024 NTFS (0x07) + +# List files in a partition (offset 2048 sectors) +fls -o 2048 /cases/case-2024-001/images/evidence.dd +``` + +### Step 3: Configure and Run Ingest Modules + +``` +Enable the following Autopsy Ingest Modules: +- Recent Activity: Extracts browser history, downloads, cookies, bookmarks +- Hash Lookup: Compares files against NSRL and known-bad hash sets +- File Type Identification: Identifies files by signature, not extension +- Keyword Search: Indexes content for full-text searching +- Email Parser: Extracts emails from PST, MBOX, EML files +- Extension Mismatch Detector: Finds files with wrong extensions +- Exif Parser: Extracts metadata from images (GPS, camera, timestamps) +- Encryption Detection: Identifies encrypted files and containers +- Interesting Files Identifier: Flags files matching custom rule sets +- Embedded File Extractor: Extracts files from ZIP, Office docs, PDFs +- Picture Analyzer: Categorizes images using PhotoDNA or hash matching +- Data Source Integrity: Verifies image hash during ingest +``` + +```bash +# Configure NSRL hash set for known-good filtering +# Download NSRL from https://www.nist.gov/itl/ssd/software-quality-group/national-software-reference-library-nsrl +wget https://s3.amazonaws.com/rds.nsrl.nist.gov/RDS/current/rds_modernm.zip +unzip rds_modernm.zip -d /opt/autopsy/hashsets/ + +# Import into Autopsy: +# Tools > Options > Hash Sets > Import > Select NSRLFile.txt +# Mark as "Known" (to filter out known-good files) +``` + +### Step 4: Analyze File System and Recover Deleted Files + +```bash +# In Autopsy GUI: Navigate tree structure +# - Data Sources > evidence.dd > vol2 (NTFS) +# - Examine directory tree, note deleted files (marked with X) + +# Using Sleuth Kit CLI for targeted recovery +# List deleted files +fls -rd -o 2048 /cases/case-2024-001/images/evidence.dd + +# Recover a specific deleted file by inode +icat -o 2048 /cases/case-2024-001/images/evidence.dd 14523 > /cases/case-2024-001/recovered/deleted_document.docx + +# Extract all files from a directory +tsk_recover -o 2048 -d /Users/suspect/Documents \ + /cases/case-2024-001/images/evidence.dd \ + /cases/case-2024-001/recovered/documents/ + +# Get detailed file metadata +istat -o 2048 /cases/case-2024-001/images/evidence.dd 14523 +# Shows: creation, modification, access, MFT change timestamps, size, data runs +``` + +### Step 5: Perform Keyword Searches and Tag Evidence + +``` +In Autopsy: +1. Keyword Search panel > "Ad Hoc Keyword Search" +2. Search terms: credit card patterns, SSN regex, email addresses +3. Example regex for credit cards: \b(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14})\b +4. Example regex for SSN: \b\d{3}-\d{2}-\d{4}\b +5. Review results > Right-click items > "Add Tag" +6. Create tags: "Evidence-Critical", "Evidence-Supporting", "Requires-Review" +7. Add comments to tagged items documenting relevance +``` + +```bash +# Using Sleuth Kit for CLI keyword search +srch_strings -a -o 2048 /cases/case-2024-001/images/evidence.dd | \ + grep -iE '(password|secret|confidential)' > /cases/case-2024-001/keyword_hits.txt + +# Search for specific file signatures +sigfind -o 2048 /cases/case-2024-001/images/evidence.dd 25504446 +# 25504446 = %PDF header signature +``` + +### Step 6: Build Timeline and Generate Reports + +``` +In Autopsy: +1. Timeline viewer: Tools > Timeline +2. Select date range of interest (incident window) +3. Filter by event type: File Created, Modified, Accessed, Web Activity +4. Zoom into suspicious time periods +5. Export timeline events as CSV for external analysis + +Generate Report: +1. Generate Report > HTML Report +2. Select tagged items and data sources to include +3. Configure report sections: file listings, keyword hits, timeline +4. Export to /cases/case-2024-001/reports/ +``` + +```bash +# Using Sleuth Kit mactime for CLI timeline +fls -r -m "/" -o 2048 /cases/case-2024-001/images/evidence.dd > /cases/case-2024-001/bodyfile.txt + +# Generate timeline from bodyfile +mactime -b /cases/case-2024-001/bodyfile.txt -d > /cases/case-2024-001/timeline.csv + +# Filter timeline to specific date range +mactime -b /cases/case-2024-001/bodyfile.txt \ + -d 2024-01-15..2024-01-20 > /cases/case-2024-001/incident_timeline.csv +``` + +## Key Concepts + +| Concept | Description | +|---------|-------------| +| Ingest Modules | Automated analysis plugins that process data sources upon import | +| MFT (Master File Table) | NTFS metadata structure recording all file entries and attributes | +| File carving | Recovering files from unallocated space using file signatures | +| Hash filtering | Using NSRL or custom hash sets to exclude known-good or flag known-bad files | +| Timeline analysis | Chronological reconstruction of file system and user activity events | +| Deleted file recovery | Restoring files whose directory entries are removed but data remains | +| Keyword indexing | Full-text search index built from all file content including slack space | +| Artifact extraction | Automated parsing of browser, email, registry, and OS-specific artifacts | + +## Tools & Systems + +| Tool | Purpose | +|------|---------| +| Autopsy | Open-source GUI forensic platform for disk image analysis | +| The Sleuth Kit (TSK) | Command-line forensic toolkit underlying Autopsy | +| fls | List files and directories in a disk image including deleted entries | +| icat | Extract file content by inode number from a disk image | +| mactime | Generate timeline from TSK bodyfile format | +| mmls | Display partition layout of a disk image | +| NSRL | NIST hash database for identifying known software files | +| sigfind | Search for file signatures at the sector level | + +## Common Scenarios + +**Scenario 1: Employee Data Theft Investigation** +Import the employee workstation image, run all ingest modules, search for company-confidential file names and keywords, examine USB connection artifacts in Recent Activity, check for cloud storage client artifacts, review deleted files for evidence of data staging, generate HTML report for legal team. + +**Scenario 2: Malware Infection Forensics** +Add the compromised system image, enable Extension Mismatch and Encryption Detection modules, examine the prefetch directory for execution evidence, search for known malware hashes, build timeline around the infection window, extract suspicious executables for further analysis in a sandbox. + +**Scenario 3: Child Exploitation Material (CSAM) Investigation** +Import image with PhotoDNA and Project VIC hash sets enabled, run Picture Analyzer module, hash all image files against known-bad databases, tag and categorize matches by severity, generate law enforcement report with chain of custody documentation. + +**Scenario 4: Intellectual Property Dispute** +Import multiple employee disk images as separate data sources in one case, perform keyword searches for proprietary terms and project names, compare file hashes between sources, build timeline showing file access and transfer patterns, export evidence for legal review. + +## Output Format + +``` +Autopsy Case Analysis Summary: + Case: CASE-2024-001-Workstation + Image: evidence.dd (500GB NTFS) + Partitions: 2 (System Reserved + Primary) + Total Files: 245,832 + Deleted Files: 12,456 (recoverable: 8,234) + + Ingest Results: + Hash Matches (Known Bad): 3 files + Extension Mismatches: 17 files + Keyword Hits: 234 across 45 files + Encrypted Files: 5 containers detected + EXIF Data Extracted: 1,245 images with metadata + + Tagged Evidence: + Critical: 12 items + Supporting: 34 items + Review: 67 items + + Timeline Events: 1,234,567 entries (filtered to incident window: 892) + Report: /cases/case-2024-001/reports/autopsy_report.html +``` diff --git a/personas/_shared/skills/analyzing-disk-image-with-autopsy/references/api-reference.md b/personas/_shared/skills/analyzing-disk-image-with-autopsy/references/api-reference.md new file mode 100644 index 0000000..5ed560c --- /dev/null +++ b/personas/_shared/skills/analyzing-disk-image-with-autopsy/references/api-reference.md @@ -0,0 +1,118 @@ +# API Reference: Autopsy and The Sleuth Kit (TSK) + +## mmls - Partition Layout + +### Syntax +```bash +mmls +mmls -t dos # Force DOS partition table +mmls -t gpt # Force GPT partition table +``` + +### Output Format +``` +DOS Partition Table +Offset Sector: 0 + Slot Start End Length Description + 00: 00:00 0000002048 0001026047 0001024000 NTFS (0x07) +``` + +## fls - File Listing + +### Syntax +```bash +fls -o # List root directory +fls -r -o # Recursive listing +fls -rd -o # Deleted files only, recursive +fls -m "/" -r -o # Bodyfile format for mactime +``` + +### Flags +| Flag | Description | +|------|-------------| +| `-r` | Recursive listing | +| `-d` | Deleted entries only | +| `-D` | Directories only | +| `-m "/"` | Output in bodyfile format with mount point | +| `-o` | Partition sector offset | + +## icat - File Extraction by Inode + +### Syntax +```bash +icat -o > recovered_file +icat -r -o > file # Recover slack space +``` + +## istat - File Metadata + +### Syntax +```bash +istat -o +``` + +### Output Includes +- MFT entry number and sequence +- File creation, modification, access, MFT change timestamps +- File size and data run locations +- Attribute list (NTFS: $STANDARD_INFORMATION, $FILE_NAME, $DATA) + +## mactime - Timeline Generation + +### Syntax +```bash +mactime -b -d > timeline.csv +mactime -b -d 2024-01-15..2024-01-20 > filtered.csv +mactime -b -z UTC -d > timeline_utc.csv +``` + +### Output Columns +``` +Date,Size,Type,Mode,UID,GID,Meta,File Name +``` + +## img_stat - Image Information + +### Syntax +```bash +img_stat +``` + +## sigfind - File Signature Search + +### Syntax +```bash +sigfind -o +sigfind -o 2048 evidence.dd 25504446 # Find %PDF headers +sigfind -o 2048 evidence.dd 504B0304 # Find ZIP/DOCX headers +``` + +### Common Signatures +| Hex | File Type | +|-----|-----------| +| `FFD8FF` | JPEG | +| `89504E47` | PNG | +| `25504446` | PDF | +| `504B0304` | ZIP/DOCX/XLSX | +| `D0CF11E0` | OLE (DOC/XLS) | + +## srch_strings - Keyword Search + +### Syntax +```bash +srch_strings -a -o | grep -i "keyword" +srch_strings -t d # Print offset in decimal +``` + +## Autopsy GUI Ingest Modules + +| Module | Function | +|--------|----------| +| Recent Activity | Browser history, downloads, cookies | +| Hash Lookup | NSRL and known-bad hash matching | +| File Type Identification | Signature-based file type detection | +| Keyword Search | Full-text content indexing | +| Email Parser | PST/MBOX/EML extraction | +| Extension Mismatch | Wrong file extension detection | +| Embedded File Extractor | ZIP, Office, PDF extraction | +| Encryption Detection | Encrypted container identification | diff --git a/personas/_shared/skills/analyzing-disk-image-with-autopsy/scripts/agent.py b/personas/_shared/skills/analyzing-disk-image-with-autopsy/scripts/agent.py new file mode 100644 index 0000000..4ad4819 --- /dev/null +++ b/personas/_shared/skills/analyzing-disk-image-with-autopsy/scripts/agent.py @@ -0,0 +1,218 @@ +#!/usr/bin/env python3 +"""Forensic disk image analysis agent using The Sleuth Kit (TSK) command-line tools.""" + +import shlex +import subprocess +import os +import sys +import json +import csv +import datetime + + +def run_cmd(cmd): + """Execute a command and return output.""" + if isinstance(cmd, str): + cmd = shlex.split(cmd) + result = subprocess.run(cmd, capture_output=True, text=True, timeout=120) + return result.stdout.strip(), result.stderr.strip(), result.returncode + + +def get_image_info(image_path): + """Retrieve disk image metadata using img_stat.""" + stdout, _, rc = run_cmd(f"img_stat {image_path}") + if rc == 0: + info = {} + for line in stdout.splitlines(): + if ":" in line: + key, _, val = line.partition(":") + info[key.strip()] = val.strip() + return info + return None + + +def list_partitions(image_path): + """List partition layout using mmls.""" + stdout, _, rc = run_cmd(f"mmls {image_path}") + partitions = [] + if rc == 0: + for line in stdout.splitlines(): + parts = line.split() + if len(parts) >= 6 and parts[2].isdigit(): + partitions.append({ + "slot": parts[0].rstrip(":"), + "start": int(parts[2]), + "end": int(parts[3]), + "length": int(parts[4]), + "description": " ".join(parts[5:]), + }) + return partitions + + +def list_files(image_path, offset, path="/", recursive=False): + """List files in a partition using fls.""" + flags = "-r" if recursive else "" + cmd = f"fls {flags} -o {offset} {image_path}" + if path != "/": + cmd += f" -D {path}" + stdout, _, rc = run_cmd(cmd) + files = [] + if rc == 0: + for line in stdout.splitlines(): + line = line.strip() + if not line: + continue + parts = line.split("\t", 1) + if len(parts) == 2: + meta = parts[0].strip() + name = parts[1].strip() + deleted = meta.startswith("*") + file_type = "d" if "d/" in meta else "r" + inode = "" + for token in meta.split(): + if "-" in token and token.replace("-", "").isdigit(): + inode = token + break + files.append({ + "name": name, + "inode": inode, + "type": "directory" if file_type == "d" else "file", + "deleted": deleted, + }) + return files + + +def list_deleted_files(image_path, offset): + """List only deleted files using fls -rd.""" + stdout, _, rc = run_cmd(f"fls -rd -o {offset} {image_path}") + deleted = [] + if rc == 0: + for line in stdout.splitlines(): + line = line.strip() + if line: + deleted.append(line) + return deleted + + +def recover_file(image_path, offset, inode, output_path): + """Recover a file by inode using icat.""" + result = subprocess.run( + ["icat", "-o", str(offset), image_path, str(inode)], + capture_output=True, + timeout=120, + ) + if result.returncode == 0: + with open(output_path, "wb") as f: + f.write(result.stdout) + return result.returncode == 0 + + +def get_file_metadata(image_path, offset, inode): + """Get detailed file metadata using istat.""" + stdout, _, rc = run_cmd(f"istat -o {offset} {image_path} {inode}") + return stdout if rc == 0 else None + + +def create_bodyfile(image_path, offset, output_path): + """Generate a TSK bodyfile for timeline creation.""" + result = subprocess.run( + ["fls", "-r", "-m", "/", "-o", str(offset), image_path], + capture_output=True, text=True, + timeout=120, + ) + if result.returncode == 0: + with open(output_path, "w") as f: + f.write(result.stdout) + return result.returncode == 0 + + +def generate_timeline(bodyfile_path, output_csv, start_date=None, end_date=None): + """Generate a timeline from a bodyfile using mactime.""" + cmd = ["mactime", "-b", bodyfile_path, "-d"] + if start_date and end_date: + cmd.append(f"{start_date}..{end_date}") + result = subprocess.run(cmd, capture_output=True, text=True, timeout=120) + if result.returncode == 0: + with open(output_csv, "w") as f: + f.write(result.stdout) + return result.returncode == 0 + + +def search_keywords(image_path, offset, keyword): + """Search for keyword strings in the disk image.""" + result = subprocess.run( + ["srch_strings", "-a", "-o", str(offset), image_path], + capture_output=True, text=True, + timeout=120, + ) + if result.returncode != 0 or not result.stdout: + return [] + keyword_lower = keyword.lower() + return [line for line in result.stdout.splitlines() if keyword_lower in line.lower()] + + +def find_file_signature(image_path, offset, hex_signature): + """Find file signatures at the sector level using sigfind.""" + stdout, _, rc = run_cmd(f"sigfind -o {offset} {image_path} {hex_signature}") + return stdout if rc == 0 else None + + +def analyze_image(image_path, case_dir): + """Run a full automated analysis workflow on a disk image.""" + os.makedirs(case_dir, exist_ok=True) + results = {"image": image_path, "timestamp": datetime.datetime.utcnow().isoformat()} + + print(f"[*] Image info...") + results["image_info"] = get_image_info(image_path) + + print(f"[*] Partition layout...") + partitions = list_partitions(image_path) + results["partitions"] = partitions + + for part in partitions: + if "NTFS" in part.get("description", "") or "Linux" in part.get("description", ""): + offset = part["start"] + print(f"[*] Listing files at offset {offset} ({part['description']})...") + files = list_files(image_path, offset, recursive=True) + results[f"files_offset_{offset}"] = { + "total": len(files), + "deleted": sum(1 for f in files if f["deleted"]), + } + print(f" Total: {len(files)}, Deleted: {results[f'files_offset_{offset}']['deleted']}") + + print(f"[*] Creating bodyfile for timeline...") + bf_path = os.path.join(case_dir, f"bodyfile_{offset}.txt") + create_bodyfile(image_path, offset, bf_path) + + tl_path = os.path.join(case_dir, f"timeline_{offset}.csv") + generate_timeline(bf_path, tl_path) + + report_path = os.path.join(case_dir, "analysis_summary.json") + with open(report_path, "w") as f: + json.dump(results, f, indent=2, default=str) + print(f"[*] Summary saved to {report_path}") + return results + + +if __name__ == "__main__": + print("=" * 60) + print("Disk Image Forensic Analysis Agent") + print("Tools: The Sleuth Kit (fls, icat, mmls, mactime)") + print("=" * 60) + + if len(sys.argv) > 1: + image = sys.argv[1] + import tempfile + case = sys.argv[2] if len(sys.argv) > 2 else os.environ.get("AUTOPSY_CASE_DIR", os.path.join(tempfile.gettempdir(), "autopsy_case")) + if os.path.exists(image): + analyze_image(image, case) + else: + print(f"[ERROR] Image not found: {image}") + else: + print("\n[DEMO] Usage: python agent.py [case_directory]") + print("[*] Supported operations:") + print(" - Partition enumeration (mmls)") + print(" - File listing with deleted file recovery (fls, icat)") + print(" - Timeline generation (mactime)") + print(" - Keyword searching (srch_strings)") + print(" - File signature detection (sigfind)") diff --git a/personas/_shared/skills/analyzing-dns-logs-for-exfiltration/LICENSE b/personas/_shared/skills/analyzing-dns-logs-for-exfiltration/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-dns-logs-for-exfiltration/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-dns-logs-for-exfiltration/SKILL.md b/personas/_shared/skills/analyzing-dns-logs-for-exfiltration/SKILL.md new file mode 100644 index 0000000..83a0891 --- /dev/null +++ b/personas/_shared/skills/analyzing-dns-logs-for-exfiltration/SKILL.md @@ -0,0 +1,304 @@ +--- +name: analyzing-dns-logs-for-exfiltration +description: 'Analyzes DNS query logs to detect data exfiltration via DNS tunneling, DGA domain communication, and covert + C2 channels using entropy analysis, query volume anomalies, and subdomain length detection in SIEM platforms. Use when SOC + teams need to identify DNS-based threats that bypass traditional network security controls. + + ' +domain: cybersecurity +subdomain: soc-operations +tags: +- soc +- dns +- exfiltration +- dns-tunneling +- dga +- c2-detection +- splunk +- threat-detection +version: '1.0' +author: mahipal +license: Apache-2.0 +atlas_techniques: +- AML.T0024 +- AML.T0056 +- AML.T0086 +nist_csf: +- DE.CM-01 +- DE.AE-02 +- RS.MA-01 +- DE.AE-06 +--- +# Analyzing DNS Logs for Exfiltration + +## When to Use + +Use this skill when: +- SOC teams suspect data exfiltration through DNS tunneling to bypass firewall/proxy controls +- Threat intelligence indicates adversaries using DNS-based C2 channels (e.g., Cobalt Strike DNS beacon) +- UEBA detects anomalous DNS query volumes from specific hosts +- Malware analysis reveals DNS-over-HTTPS (DoH) or DNS tunneling capabilities + +**Do not use** for standard DNS troubleshooting or availability monitoring — this skill focuses on security-relevant DNS abuse detection. + +## Prerequisites + +- DNS query logging enabled (Windows DNS Server, Bind, Infoblox, or Cisco Umbrella) +- DNS logs ingested into SIEM (Splunk with `Stream:DNS`, `dns` sourcetype, or Zeek DNS logs) +- Passive DNS data for historical domain resolution analysis +- Baseline of normal DNS behavior (query volume, domain distribution, TXT record frequency) +- Python with `math` and `collections` libraries for entropy calculation + +## Workflow + +### Step 1: Detect DNS Tunneling via Subdomain Length Analysis + +DNS tunneling encodes data in subdomain labels, creating unusually long queries: + +```spl +index=dns sourcetype="stream:dns" query_type IN ("A", "AAAA", "TXT", "CNAME", "MX") +| eval domain_parts = split(query, ".") +| eval subdomain = mvindex(domain_parts, 0, mvcount(domain_parts)-3) +| eval subdomain_str = mvjoin(subdomain, ".") +| eval subdomain_len = len(subdomain_str) +| eval tld = mvindex(domain_parts, -1) +| eval registered_domain = mvindex(domain_parts, -2).".".tld +| where subdomain_len > 50 +| stats count AS queries, dc(query) AS unique_queries, + avg(subdomain_len) AS avg_subdomain_len, + max(subdomain_len) AS max_subdomain_len, + values(src_ip) AS sources + by registered_domain +| where queries > 20 +| sort - avg_subdomain_len +| table registered_domain, queries, unique_queries, avg_subdomain_len, max_subdomain_len, sources +``` + +### Step 2: Detect High-Entropy Domain Queries (DGA Detection) + +Domain Generation Algorithms produce random-looking domains: + +```spl +index=dns sourcetype="stream:dns" +| eval domain_parts = split(query, ".") +| eval sld = mvindex(domain_parts, -2) +| eval sld_len = len(sld) +| eval char_count = sld_len +| eval vowels = len(replace(sld, "[^aeiou]", "")) +| eval consonants = len(replace(sld, "[^bcdfghjklmnpqrstvwxyz]", "")) +| eval digits = len(replace(sld, "[^0-9]", "")) +| eval vowel_ratio = if(char_count > 0, vowels / char_count, 0) +| eval digit_ratio = if(char_count > 0, digits / char_count, 0) +| where sld_len > 12 AND (vowel_ratio < 0.2 OR digit_ratio > 0.3) +| stats count AS queries, dc(query) AS unique_domains, values(src_ip) AS sources + by query +| where unique_domains > 10 +| sort - queries +``` + +**Python-based Shannon Entropy Calculation for DNS queries:** + +```python +import math +from collections import Counter + +def shannon_entropy(text): + """Calculate Shannon entropy of a string""" + if not text: + return 0 + counter = Counter(text.lower()) + length = len(text) + entropy = -sum( + (count / length) * math.log2(count / length) + for count in counter.values() + ) + return round(entropy, 4) + +# Test with examples +normal_domain = "google" # Low entropy +dga_domain = "x8kj2m9p4qw7n" # High entropy +tunnel_subdomain = "aGVsbG8gd29ybGQ.evil.com" # Base64 encoded data + +print(f"Normal: {shannon_entropy(normal_domain)}") # ~2.25 +print(f"DGA: {shannon_entropy(dga_domain)}") # ~3.70 +print(f"Tunnel: {shannon_entropy(tunnel_subdomain)}") # ~3.50 + +# Threshold: entropy > 3.5 for subdomain = likely tunneling/DGA +``` + +**Splunk implementation of entropy scoring:** + +```spl +index=dns sourcetype="stream:dns" +| eval domain_parts = split(query, ".") +| eval check_string = mvindex(domain_parts, 0) +| eval check_len = len(check_string) +| where check_len > 8 +| eval chars = split(check_string, "") +| stats count AS total_chars, dc(chars) AS unique_chars by query, src_ip, check_string, check_len +| eval entropy_estimate = log(unique_chars, 2) * (unique_chars / check_len) +| where entropy_estimate > 3.5 +| stats count AS high_entropy_queries, dc(query) AS unique_queries by src_ip +| where high_entropy_queries > 50 +| sort - high_entropy_queries +``` + +### Step 3: Detect Anomalous DNS Query Volume + +Identify hosts generating abnormal DNS traffic: + +```spl +index=dns sourcetype="stream:dns" earliest=-24h +| bin _time span=1h +| stats count AS queries, dc(query) AS unique_domains by src_ip, _time +| eventstats avg(queries) AS avg_queries, stdev(queries) AS stdev_queries by src_ip +| eval z_score = (queries - avg_queries) / stdev_queries +| where z_score > 3 OR queries > 5000 +| sort - z_score +| table _time, src_ip, queries, unique_domains, avg_queries, z_score +``` + +**Detect TXT record abuse (common tunneling method):** + +```spl +index=dns sourcetype="stream:dns" query_type="TXT" +| stats count AS txt_queries, dc(query) AS unique_txt_domains, + values(query) AS domains by src_ip +| where txt_queries > 100 +| eval suspicion = case( + txt_queries > 1000, "CRITICAL — Likely DNS tunneling", + txt_queries > 500, "HIGH — Possible DNS tunneling", + txt_queries > 100, "MEDIUM — Unusual TXT volume" + ) +| sort - txt_queries +| table src_ip, txt_queries, unique_txt_domains, suspicion +``` + +### Step 4: Detect Known DNS Tunneling Tools + +Search for signatures of common DNS tunneling tools: + +```spl +index=dns sourcetype="stream:dns" +| eval query_lower = lower(query) +| where ( + match(query_lower, "\.dnscat\.") OR + match(query_lower, "\.dns2tcp\.") OR + match(query_lower, "\.iodine\.") OR + match(query_lower, "\.dnscapy\.") OR + match(query_lower, "\.cobalt.*\.beacon") OR + query_type="NULL" OR + (query_type="TXT" AND len(query) > 100) + ) +| stats count by src_ip, query, query_type +| sort - count +``` + +**Detect DNS over HTTPS (DoH) bypassing local DNS:** + +```spl +index=proxy OR index=firewall +dest IN ("1.1.1.1", "1.0.0.1", "8.8.8.8", "8.8.4.4", + "9.9.9.9", "149.112.112.112", "208.67.222.222") +dest_port=443 +| stats sum(bytes_out) AS total_bytes, count AS connections by src_ip, dest +| where connections > 100 OR total_bytes > 10485760 +| eval alert = "Possible DoH bypass — DNS queries sent over HTTPS to public resolver" +| sort - total_bytes +``` + +### Step 5: Correlate DNS Findings with Endpoint Data + +Cross-reference suspicious DNS with process data: + +```spl +index=dns src_ip="192.168.1.105" query="*.evil-tunnel.com" earliest=-24h +| stats count AS dns_queries, earliest(_time) AS first_query, latest(_time) AS last_query + by src_ip, query +| join src_ip [ + search index=sysmon EventCode=3 DestinationPort=53 Computer="WORKSTATION-042" + | stats count AS connections, values(Image) AS processes by SourceIp + | rename SourceIp AS src_ip + ] +| table src_ip, query, dns_queries, first_query, last_query, processes +``` + +### Step 6: Calculate Data Exfiltration Volume Estimate + +Estimate data volume encoded in DNS queries: + +```spl +index=dns src_ip="192.168.1.105" query="*.evil-tunnel.com" earliest=-24h +| eval domain_parts = split(query, ".") +| eval encoded_data = mvindex(domain_parts, 0) +| eval encoded_bytes = len(encoded_data) +| eval decoded_bytes = encoded_bytes * 0.75 -- Base64 decoding factor +| stats sum(decoded_bytes) AS total_bytes_estimated, count AS total_queries, + earliest(_time) AS first_seen, latest(_time) AS last_seen +| eval estimated_kb = round(total_bytes_estimated / 1024, 1) +| eval estimated_mb = round(total_bytes_estimated / 1048576, 2) +| eval duration_hours = round((last_seen - first_seen) / 3600, 1) +| eval rate_kbps = round(estimated_kb / (duration_hours * 3600) * 8, 2) +| table total_queries, estimated_mb, duration_hours, rate_kbps, first_seen, last_seen +``` + +## Key Concepts + +| Term | Definition | +|------|-----------| +| **DNS Tunneling** | Technique encoding data within DNS queries/responses to exfiltrate data or establish C2 channels through DNS | +| **DGA** | Domain Generation Algorithm — malware technique generating pseudo-random domain names for C2 resilience | +| **Shannon Entropy** | Mathematical measure of randomness in a string — high entropy (>3.5) in domain names indicates DGA or tunneling | +| **TXT Record Abuse** | Using DNS TXT records (designed for text data) as a high-bandwidth channel for data tunneling | +| **DNS over HTTPS (DoH)** | DNS queries encrypted over HTTPS (port 443), bypassing traditional DNS monitoring | +| **Passive DNS** | Historical record of DNS resolutions showing which IPs a domain resolved to over time | + +## Tools & Systems + +- **Splunk Stream**: Network traffic capture add-on providing parsed DNS query data for SIEM analysis +- **Zeek (Bro)**: Network security monitor generating detailed DNS transaction logs for analysis +- **Cisco Umbrella (OpenDNS)**: Cloud DNS security platform blocking malicious domains and logging query data +- **Infoblox DNS Firewall**: DNS-layer security providing RPZ-based blocking and detailed query logging +- **Farsight DNSDB**: Passive DNS database for historical domain resolution lookups and infrastructure mapping + +## Common Scenarios + +- **Cobalt Strike DNS Beacon**: Detect periodic TXT queries with encoded payloads to C2 domain +- **Data Exfiltration**: Large volumes of unique subdomain queries encoding stolen data in Base64/hex +- **DGA Malware**: Detect DNS queries to algorithmically generated domains (high entropy, no web content) +- **DNS-over-HTTPS Bypass**: Employee using DoH to bypass corporate DNS filtering and monitoring +- **Slow Drip Exfiltration**: Low-volume DNS tunneling staying below threshold alerts (requires baseline comparison) + +## Output Format + +``` +DNS EXFILTRATION ANALYSIS — WORKSTATION-042 +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Period: 2024-03-14 to 2024-03-15 +Source: 192.168.1.105 (WORKSTATION-042, Finance Dept) + +Findings: + [CRITICAL] DNS tunneling detected to evil-tunnel[.]com + Query Volume: 12,847 queries in 18 hours + Avg Subdomain Len: 63 characters (normal: <20) + Avg Entropy: 3.82 (threshold: 3.5) + Query Types: TXT (89%), A (11%) + Estimated Data: ~4.7 MB exfiltrated via DNS + Rate: 0.58 kbps (slow drip pattern) + + [HIGH] DGA-like domains resolved + Unique DGA Domains: 247 domains resolved + Pattern: 15-char random alphanumeric.xyz TLD + Entropy Range: 3.6 - 4.1 + +Process Attribution: + Process: svchost_update.exe (masquerading — not legitimate svchost) + PID: 4892 + Parent: explorer.exe + Hash: SHA256: a1b2c3d4... (VT: 34/72 malicious — Cobalt Strike beacon) + +Containment: + [DONE] Host isolated via EDR + [DONE] Domain evil-tunnel[.]com added to DNS sinkhole + [DONE] Incident IR-2024-0448 created +``` diff --git a/personas/_shared/skills/analyzing-dns-logs-for-exfiltration/references/api-reference.md b/personas/_shared/skills/analyzing-dns-logs-for-exfiltration/references/api-reference.md new file mode 100644 index 0000000..f6ea940 --- /dev/null +++ b/personas/_shared/skills/analyzing-dns-logs-for-exfiltration/references/api-reference.md @@ -0,0 +1,112 @@ +# API Reference: DNS Exfiltration Detection Tools + +## Shannon Entropy Calculation + +### Python Implementation +```python +import math +from collections import Counter + +def shannon_entropy(text): + counter = Counter(text.lower()) + length = len(text) + return -sum((c/length) * math.log2(c/length) for c in counter.values()) +``` + +### Threshold Values +| Entropy | Classification | +|---------|---------------| +| < 2.5 | Normal domain (e.g., "google") | +| 2.5 - 3.5 | Borderline (monitor) | +| > 3.5 | Suspicious (likely DGA/tunneling) | +| > 4.0 | High confidence malicious | + +## Splunk DNS Queries + +### Tunneling Detection +```spl +index=dns sourcetype="stream:dns" +| eval subdomain_len=len(mvindex(split(query,"."),0)) +| where subdomain_len > 50 +| stats count by registered_domain, src_ip +``` + +### DGA Detection +```spl +index=dns +| eval sld=mvindex(split(query,"."), -2) +| where len(sld) > 12 +| stats count, dc(query) AS unique by src_ip +``` + +### Volume Anomaly +```spl +index=dns earliest=-24h +| bin _time span=1h +| stats count AS queries by src_ip, _time +| eventstats avg(queries) AS avg_q, stdev(queries) AS stdev_q by src_ip +| eval z_score=(queries - avg_q) / stdev_q +| where z_score > 3 +``` + +### TXT Record Abuse +```spl +index=dns query_type="TXT" +| stats count AS txt_queries by src_ip +| where txt_queries > 100 +``` + +## Zeek DNS Log Format + +### Log Fields (dns.log) +| Column | Field | Description | +|--------|-------|-------------| +| 0 | ts | Timestamp | +| 2 | id.orig_h | Source IP | +| 4 | id.resp_h | DNS server IP | +| 9 | query | Query domain name | +| 13 | qtype_name | Query type (A, TXT, CNAME) | +| 15 | rcode_name | Response code | +| 21 | answers | Response answers | + +### Zeek CLI Analysis +```bash +cat dns.log | zeek-cut query qtype_name id.orig_h | sort | uniq -c | sort -rn +``` + +## DNS Tunneling Tools (Detection Signatures) + +| Tool | DNS Pattern | +|------|-------------| +| iodine | `*.pirate.sea` (TXT/NULL records) | +| dnscat2 | `*.dnscat.` prefix in queries | +| dns2tcp | `*.dns2tcp.` pattern | +| Cobalt Strike DNS | Periodic TXT queries with encoded payloads | + +## Passive DNS Lookup APIs + +### Farsight DNSDB +```bash +curl -H "X-API-Key: $KEY" \ + "https://api.dnsdb.info/dnsdb/v2/lookup/rrset/name/evil.com/A" +``` + +### VirusTotal Domain Resolutions +```bash +curl -H "x-apikey: $KEY" \ + "https://www.virustotal.com/api/v3/domains/evil.com/resolutions" +``` + +## Cisco Umbrella (OpenDNS) Investigate API + +### Domain Categorization +```bash +curl -H "Authorization: Bearer $TOKEN" \ + "https://investigate.api.umbrella.com/domains/categorization/evil.com" +``` + +### Security Information +```bash +curl -H "Authorization: Bearer $TOKEN" \ + "https://investigate.api.umbrella.com/security/name/evil.com" +``` diff --git a/personas/_shared/skills/analyzing-dns-logs-for-exfiltration/scripts/agent.py b/personas/_shared/skills/analyzing-dns-logs-for-exfiltration/scripts/agent.py new file mode 100644 index 0000000..4b9a61d --- /dev/null +++ b/personas/_shared/skills/analyzing-dns-logs-for-exfiltration/scripts/agent.py @@ -0,0 +1,229 @@ +#!/usr/bin/env python3 +"""DNS exfiltration detection agent using entropy analysis and query pattern detection.""" + +import math +from collections import Counter, defaultdict + + +def shannon_entropy(text): + """Calculate Shannon entropy of a string.""" + if not text: + return 0.0 + counter = Counter(text.lower()) + length = len(text) + entropy = -sum( + (count / length) * math.log2(count / length) + for count in counter.values() + ) + return round(entropy, 4) + + +def extract_subdomain(fqdn): + """Extract the subdomain portion from a fully qualified domain name.""" + parts = fqdn.rstrip(".").split(".") + if len(parts) > 2: + return ".".join(parts[:-2]) + return "" + + +def extract_registered_domain(fqdn): + """Extract the registered domain (SLD + TLD) from an FQDN.""" + parts = fqdn.rstrip(".").split(".") + if len(parts) >= 2: + return ".".join(parts[-2:]) + return fqdn + + +def detect_tunneling(dns_records, subdomain_len_threshold=50, min_queries=20): + """Detect DNS tunneling based on subdomain length anomalies.""" + domain_stats = defaultdict(lambda: {"queries": 0, "unique_queries": set(), + "subdomain_lengths": [], "sources": set()}) + for record in dns_records: + query = record.get("query", "") + src = record.get("src_ip", "unknown") + subdomain = extract_subdomain(query) + reg_domain = extract_registered_domain(query) + if len(subdomain) > subdomain_len_threshold: + stats = domain_stats[reg_domain] + stats["queries"] += 1 + stats["unique_queries"].add(query) + stats["subdomain_lengths"].append(len(subdomain)) + stats["sources"].add(src) + alerts = [] + for domain, stats in domain_stats.items(): + if stats["queries"] >= min_queries: + avg_len = sum(stats["subdomain_lengths"]) / len(stats["subdomain_lengths"]) + max_len = max(stats["subdomain_lengths"]) + alerts.append({ + "domain": domain, + "queries": stats["queries"], + "unique_queries": len(stats["unique_queries"]), + "avg_subdomain_length": round(avg_len, 1), + "max_subdomain_length": max_len, + "sources": list(stats["sources"]), + "verdict": "CRITICAL - Likely DNS tunneling", + }) + return sorted(alerts, key=lambda x: x["avg_subdomain_length"], reverse=True) + + +def detect_dga(dns_records, entropy_threshold=3.5, min_sld_length=12): + """Detect Domain Generation Algorithm queries using entropy scoring.""" + suspicious = defaultdict(lambda: {"count": 0, "sources": set(), "entropies": []}) + for record in dns_records: + query = record.get("query", "").rstrip(".") + src = record.get("src_ip", "unknown") + parts = query.split(".") + if len(parts) < 2: + continue + sld = parts[-2] + if len(sld) < min_sld_length: + continue + ent = shannon_entropy(sld) + if ent > entropy_threshold: + suspicious[query]["count"] += 1 + suspicious[query]["sources"].add(src) + suspicious[query]["entropies"].append(ent) + alerts = [] + for domain, data in suspicious.items(): + avg_entropy = sum(data["entropies"]) / len(data["entropies"]) + alerts.append({ + "domain": domain, + "queries": data["count"], + "avg_entropy": round(avg_entropy, 4), + "sources": list(data["sources"]), + "verdict": "HIGH - Possible DGA domain", + }) + return sorted(alerts, key=lambda x: x["avg_entropy"], reverse=True) + + +def detect_volume_anomaly(dns_records, z_score_threshold=3.0): + """Detect hosts with anomalously high DNS query volumes.""" + host_counts = defaultdict(int) + for record in dns_records: + src = record.get("src_ip", "unknown") + host_counts[src] += 1 + if not host_counts: + return [] + values = list(host_counts.values()) + mean_q = sum(values) / len(values) + if len(values) < 2: + return [] + variance = sum((x - mean_q) ** 2 for x in values) / (len(values) - 1) + stdev_q = variance ** 0.5 + if stdev_q == 0: + return [] + anomalies = [] + for host, count in host_counts.items(): + z = (count - mean_q) / stdev_q + if z > z_score_threshold: + anomalies.append({ + "src_ip": host, + "queries": count, + "z_score": round(z, 2), + "mean": round(mean_q, 1), + "verdict": "HIGH - Anomalous query volume", + }) + return sorted(anomalies, key=lambda x: x["z_score"], reverse=True) + + +def detect_txt_abuse(dns_records, threshold=100): + """Detect excessive TXT record queries (common tunneling method).""" + txt_counts = defaultdict(lambda: {"count": 0, "unique_domains": set()}) + for record in dns_records: + qtype = str(record.get("query_type", "")).upper() + if qtype in ("TXT", "16"): + src = record.get("src_ip", "unknown") + txt_counts[src]["count"] += 1 + txt_counts[src]["unique_domains"].add(record.get("query", "")) + alerts = [] + for src, data in txt_counts.items(): + if data["count"] > threshold: + level = "CRITICAL" if data["count"] > 1000 else "HIGH" if data["count"] > 500 else "MEDIUM" + alerts.append({ + "src_ip": src, + "txt_queries": data["count"], + "unique_domains": len(data["unique_domains"]), + "verdict": f"{level} - Possible DNS tunneling via TXT records", + }) + return sorted(alerts, key=lambda x: x["txt_queries"], reverse=True) + + +def estimate_exfil_volume(dns_records, target_domain): + """Estimate data volume encoded in DNS queries to a specific domain.""" + total_encoded_bytes = 0 + query_count = 0 + for record in dns_records: + query = record.get("query", "") + if target_domain in query: + subdomain = extract_subdomain(query) + total_encoded_bytes += len(subdomain) + query_count += 1 + decoded_bytes = int(total_encoded_bytes * 0.75) # Base64 decode factor + return { + "target_domain": target_domain, + "total_queries": query_count, + "encoded_bytes": total_encoded_bytes, + "estimated_decoded_bytes": decoded_bytes, + "estimated_kb": round(decoded_bytes / 1024, 1), + "estimated_mb": round(decoded_bytes / (1024 * 1024), 3), + } + + +def parse_zeek_dns_log(log_path): + """Parse a Zeek dns.log file into structured records.""" + records = [] + with open(log_path, "r") as f: + for line in f: + if line.startswith("#"): + continue + parts = line.strip().split("\t") + if len(parts) >= 10: + records.append({ + "timestamp": parts[0], + "src_ip": parts[2], + "src_port": parts[3], + "dst_ip": parts[4], + "query": parts[9] if len(parts) > 9 else "", + "query_type": parts[13] if len(parts) > 13 else "", + }) + return records + + +if __name__ == "__main__": + print("=" * 60) + print("DNS Exfiltration Detection Agent") + print("Tunneling, DGA, volume anomaly, and TXT abuse detection") + print("=" * 60) + + # Demo with synthetic DNS records + demo_records = [ + {"query": f"{'a' * 60}.evil-tunnel.com", "src_ip": "192.168.1.105", + "query_type": "TXT"} for _ in range(50) + ] + [ + {"query": "x8kj2m9p4qw7nz3.xyz", "src_ip": "192.168.1.110", + "query_type": "A"} for _ in range(5) + ] + [ + {"query": "google.com", "src_ip": "192.168.1.50", "query_type": "A"} + for _ in range(10) + ] + + print("\n--- DNS Tunneling Detection ---") + tunneling = detect_tunneling(demo_records, subdomain_len_threshold=30, min_queries=10) + for t in tunneling: + print(f"[!] {t['domain']}: {t['queries']} queries, " + f"avg subdomain len={t['avg_subdomain_length']}") + + print("\n--- DGA Detection ---") + dga = detect_dga(demo_records, entropy_threshold=3.0, min_sld_length=10) + for d in dga[:5]: + print(f"[!] {d['domain']}: entropy={d['avg_entropy']}") + + print("\n--- TXT Record Abuse ---") + txt = detect_txt_abuse(demo_records, threshold=10) + for t in txt: + print(f"[!] {t['src_ip']}: {t['txt_queries']} TXT queries") + + print("\n--- Entropy Examples ---") + examples = ["google", "x8kj2m9p4qw7n", "aGVsbG8gd29ybGQ"] + for ex in examples: + print(f" '{ex}' -> entropy={shannon_entropy(ex)}") diff --git a/personas/_shared/skills/analyzing-docker-container-forensics/LICENSE b/personas/_shared/skills/analyzing-docker-container-forensics/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-docker-container-forensics/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-docker-container-forensics/SKILL.md b/personas/_shared/skills/analyzing-docker-container-forensics/SKILL.md new file mode 100644 index 0000000..d76d954 --- /dev/null +++ b/personas/_shared/skills/analyzing-docker-container-forensics/SKILL.md @@ -0,0 +1,341 @@ +--- +name: analyzing-docker-container-forensics +description: Investigate compromised Docker containers by analyzing images, layers, volumes, logs, and runtime artifacts to + identify malicious activity and evidence. +domain: cybersecurity +subdomain: digital-forensics +tags: +- forensics +- docker +- container-forensics +- container-security +- image-analysis +- runtime-investigation +version: '1.0' +author: mahipal +license: Apache-2.0 +nist_csf: +- RS.AN-01 +- RS.AN-03 +- DE.AE-02 +- RS.MA-01 +--- + +# Analyzing Docker Container Forensics + +## When to Use +- When investigating a compromised Docker container or container host +- For analyzing malicious Docker images pulled from registries +- During incident response involving containerized application breaches +- When examining container escape attempts or privilege escalation +- For auditing container configurations and identifying misconfigurations + +## Prerequisites +- Docker CLI access on the forensic workstation +- Access to the Docker host file system (forensic image or live) +- Understanding of Docker layered file system (overlay2, aufs) +- dive, docker-explorer, or container-diff for image analysis +- Knowledge of Docker daemon configuration and socket security +- Trivy or Grype for vulnerability scanning of container images + +## Workflow + +### Step 1: Preserve Container State and Evidence + +```bash +# List all containers (including stopped) +docker ps -a --no-trunc > /cases/case-2024-001/docker/container_list.txt + +# Inspect the compromised container +CONTAINER_ID="abc123def456" +docker inspect $CONTAINER_ID > /cases/case-2024-001/docker/container_inspect.json + +# Export container filesystem as tarball (preserves current state) +docker export $CONTAINER_ID > /cases/case-2024-001/docker/container_export.tar + +# Create an image from the container's current state +docker commit $CONTAINER_ID forensic-evidence:case-2024-001 +docker save forensic-evidence:case-2024-001 > /cases/case-2024-001/docker/container_image.tar + +# Capture container logs +docker logs $CONTAINER_ID --timestamps > /cases/case-2024-001/docker/container_logs.txt 2>&1 + +# Capture running processes (if container is still running) +docker top $CONTAINER_ID > /cases/case-2024-001/docker/container_processes.txt + +# Capture network connections +docker exec $CONTAINER_ID netstat -tlnp 2>/dev/null > /cases/case-2024-001/docker/container_network.txt + +# Copy specific files from the container +docker cp $CONTAINER_ID:/var/log/ /cases/case-2024-001/docker/container_var_log/ +docker cp $CONTAINER_ID:/tmp/ /cases/case-2024-001/docker/container_tmp/ +docker cp $CONTAINER_ID:/etc/passwd /cases/case-2024-001/docker/container_passwd + +# Hash all exported evidence +sha256sum /cases/case-2024-001/docker/*.tar > /cases/case-2024-001/docker/evidence_hashes.txt +``` + +### Step 2: Analyze Container Image Layers + +```bash +# Install dive for image layer analysis +wget https://github.com/wagoodman/dive/releases/latest/download/dive_linux_amd64.deb +sudo dpkg -i dive_linux_amd64.deb + +# Analyze image layers interactively +dive forensic-evidence:case-2024-001 + +# Non-interactive layer analysis +dive forensic-evidence:case-2024-001 --ci --json /cases/case-2024-001/docker/dive_analysis.json + +# Extract and examine individual layers +mkdir -p /cases/case-2024-001/docker/layers/ +tar -xf /cases/case-2024-001/docker/container_image.tar -C /cases/case-2024-001/docker/layers/ + +# List the image manifest and layer order +cat /cases/case-2024-001/docker/layers/manifest.json | python3 -m json.tool + +# Examine each layer for changes +for layer in /cases/case-2024-001/docker/layers/*/layer.tar; do + echo "=== Layer: $(dirname $layer | xargs basename) ===" + tar -tf "$layer" | head -20 + echo "..." +done + +# Use container-diff to compare with original base image +# Install container-diff +curl -LO https://storage.googleapis.com/container-diff/latest/container-diff-linux-amd64 +chmod +x container-diff-linux-amd64 + +# Compare committed image with original +./container-diff-linux-amd64 diff daemon://nginx:latest daemon://forensic-evidence:case-2024-001 \ + --type=file --type=apt --type=history --json \ + > /cases/case-2024-001/docker/container_diff.json +``` + +### Step 3: Examine Docker Host Artifacts + +```bash +# Docker data directory (default: /var/lib/docker/) +DOCKER_ROOT="/mnt/evidence/var/lib/docker" + +# Examine overlay2 filesystem layers +ls -la $DOCKER_ROOT/overlay2/ + +# Find the container's merged filesystem +CONTAINER_HASH=$(docker inspect $CONTAINER_ID --format '{{.GraphDriver.Data.MergedDir}}' 2>/dev/null) +# Or manually from forensic image: +# Look in /var/lib/docker/containers//config.v2.json + +# Analyze container configuration files +cat $DOCKER_ROOT/containers/$CONTAINER_ID/config.v2.json | python3 -m json.tool \ + > /cases/case-2024-001/docker/container_config.json + +# Check Docker daemon configuration +cat /mnt/evidence/etc/docker/daemon.json 2>/dev/null > /cases/case-2024-001/docker/daemon_config.json + +# Examine Docker events log +cat $DOCKER_ROOT/containers/$CONTAINER_ID/*.log > /cases/case-2024-001/docker/container_json_logs.txt + +# Check for volume mounts (potential host filesystem access) +python3 << 'PYEOF' +import json + +with open('/cases/case-2024-001/docker/container_inspect.json') as f: + data = json.load(f) + +inspect = data[0] if isinstance(data, list) else data + +print("=== CONTAINER SECURITY ANALYSIS ===\n") + +# Check mounts +print("Volume Mounts:") +for mount in inspect.get('Mounts', []): + rw = "READ-WRITE" if mount.get('RW') else "READ-ONLY" + print(f" {mount.get('Source', 'N/A')} -> {mount.get('Destination', 'N/A')} ({rw})") + if mount.get('Source') in ('/', '/etc', '/var', '/root') and mount.get('RW'): + print(f" WARNING: Sensitive host path mounted read-write!") + +# Check privileged mode +host_config = inspect.get('HostConfig', {}) +if host_config.get('Privileged'): + print("\nWARNING: Container was running in PRIVILEGED mode!") + +# Check capabilities +cap_add = host_config.get('CapAdd', []) +if cap_add: + print(f"\nAdded Capabilities: {cap_add}") + dangerous_caps = ['SYS_ADMIN', 'SYS_PTRACE', 'NET_ADMIN', 'SYS_MODULE'] + for cap in cap_add: + if cap in dangerous_caps: + print(f" WARNING: Dangerous capability: {cap}") + +# Check PID namespace +if host_config.get('PidMode') == 'host': + print("\nWARNING: Container shares host PID namespace!") + +# Check network mode +if host_config.get('NetworkMode') == 'host': + print("\nWARNING: Container shares host network namespace!") + +# Check user +user = inspect.get('Config', {}).get('User', 'root (default)') +print(f"\nRunning as user: {user}") + +# Check environment variables for secrets +env_vars = inspect.get('Config', {}).get('Env', []) +print(f"\nEnvironment Variables: {len(env_vars)}") +for env in env_vars: + key = env.split('=')[0] + if any(s in key.upper() for s in ['PASSWORD', 'SECRET', 'KEY', 'TOKEN', 'CREDENTIAL']): + print(f" SENSITIVE: {key}=***REDACTED***") +PYEOF +``` + +### Step 4: Analyze Container File System Changes + +```bash +# Compare container filesystem to original image +docker diff $CONTAINER_ID > /cases/case-2024-001/docker/filesystem_changes.txt + +# A = Added, C = Changed, D = Deleted +# Analyze changes +python3 << 'PYEOF' +added = [] +changed = [] +deleted = [] + +with open('/cases/case-2024-001/docker/filesystem_changes.txt') as f: + for line in f: + line = line.strip() + if line.startswith('A '): + added.append(line[2:]) + elif line.startswith('C '): + changed.append(line[2:]) + elif line.startswith('D '): + deleted.append(line[2:]) + +print(f"Files Added: {len(added)}") +print(f"Files Changed: {len(changed)}") +print(f"Files Deleted: {len(deleted)}") + +# Flag suspicious additions +suspicious = [f for f in added if any(s in f for s in + ['/tmp/', '/dev/shm/', '/root/', '.sh', '.py', '.elf', 'reverse', 'shell', 'backdoor'])] +if suspicious: + print(f"\nSuspicious Added Files:") + for f in suspicious: + print(f" {f}") + +# Flag suspicious changes +sus_changed = [f for f in changed if any(s in f for s in + ['/etc/passwd', '/etc/shadow', '/etc/crontab', '/etc/ssh', '.bashrc'])] +if sus_changed: + print(f"\nSuspicious Changed Files:") + for f in sus_changed: + print(f" {f}") +PYEOF + +# Extract and examine the container export +mkdir -p /cases/case-2024-001/docker/container_fs/ +tar -xf /cases/case-2024-001/docker/container_export.tar -C /cases/case-2024-001/docker/container_fs/ + +# Scan for webshells and malicious files +find /cases/case-2024-001/docker/container_fs/tmp/ -type f -exec file {} \; +find /cases/case-2024-001/docker/container_fs/ -name "*.php" -newer /cases/case-2024-001/docker/container_fs/etc/hostname +``` + +### Step 5: Scan for Vulnerabilities and Generate Report + +```bash +# Scan the image for known vulnerabilities +trivy image forensic-evidence:case-2024-001 \ + --format json \ + --output /cases/case-2024-001/docker/vulnerability_scan.json + +# Scan the exported filesystem +trivy fs /cases/case-2024-001/docker/container_fs/ \ + --format table \ + --output /cases/case-2024-001/docker/fs_vulnerabilities.txt + +# Check for secrets in the image +trivy image forensic-evidence:case-2024-001 \ + --scanners secret \ + --format json \ + --output /cases/case-2024-001/docker/secrets_scan.json +``` + +## Key Concepts + +| Concept | Description | +|---------|-------------| +| Image layers | Read-only filesystem layers stacked to form the container image | +| overlay2 | Default Docker storage driver using union filesystem for layers | +| Container diff | Comparison of runtime filesystem changes against the original image | +| Privileged mode | Container with full host capabilities (bypasses most isolation) | +| Docker socket | Unix socket (/var/run/docker.sock) controlling the Docker daemon | +| Container escape | Technique for breaking out of container isolation to the host | +| Volume mounts | Host filesystem paths made accessible inside the container | +| Image history | Record of Dockerfile instructions used to build each layer | + +## Tools & Systems + +| Tool | Purpose | +|------|---------| +| docker inspect | Detailed container configuration and state information | +| docker diff | Show filesystem changes made in a running/stopped container | +| dive | Interactive Docker image layer analysis tool | +| container-diff | Google tool for comparing container image contents | +| Trivy | Vulnerability scanner for container images and filesystems | +| docker-explorer | Forensic tool for offline Docker artifact analysis | +| Sysdig | Container runtime security monitoring and forensics | +| Falco | Runtime threat detection for containers and Kubernetes | + +## Common Scenarios + +**Scenario 1: Web Application Container Compromise** +Export the container filesystem, identify webshells in web root, analyze access logs for exploitation attempts, check for added files and modified configurations, examine network connections for C2 communication, review container capabilities for escalation paths. + +**Scenario 2: Supply Chain Attack via Malicious Image** +Analyze image layers with dive to identify which layer added malicious content, compare with the official base image using container-diff, check image history for suspicious RUN commands, scan for embedded backdoors and cryptocurrency miners, trace the image pull from registry logs. + +**Scenario 3: Container Escape Investigation** +Check if container ran privileged or with dangerous capabilities, examine host filesystem mount points for unauthorized access, review Docker socket mount enabling Docker-in-Docker abuse, analyze host system logs for container escape indicators, check for kernel exploit artifacts. + +**Scenario 4: Cryptojacking in Container Environment** +Identify high-CPU containers, export and analyze the container image for mining binaries, check for unauthorized images in the registry, review container creation events for rogue deployments, examine network connections for mining pool communications. + +## Output Format + +``` +Docker Container Forensics Summary: + Container: abc123def456 (nginx-app) + Image: company/web-app:v2.1 + Status: Running (started 2024-01-10 09:00 UTC) + Host: docker-host-01.corp.local + + Security Configuration: + Privileged: No + Capabilities Added: NET_ADMIN (WARNING) + Volume Mounts: /var/log -> /host-logs (RW) + Network Mode: bridge + User: root (WARNING) + + Filesystem Changes: + Added: 23 files (5 suspicious) + Changed: 12 files (2 suspicious) + Deleted: 0 files + + Suspicious Findings: + /tmp/reverse.sh - Reverse shell script (Added) + /var/www/html/.hidden/shell.php - PHP webshell (Added) + /etc/crontab - Modified (persistence cron entry added) + /root/.ssh/authorized_keys - Modified (unauthorized key added) + + Vulnerability Scan: + Critical: 3 (CVE-2024-xxxx in base image) + High: 12 + Medium: 34 + + Evidence: /cases/case-2024-001/docker/ +``` diff --git a/personas/_shared/skills/analyzing-docker-container-forensics/references/api-reference.md b/personas/_shared/skills/analyzing-docker-container-forensics/references/api-reference.md new file mode 100644 index 0000000..aa7c35d --- /dev/null +++ b/personas/_shared/skills/analyzing-docker-container-forensics/references/api-reference.md @@ -0,0 +1,116 @@ +# API Reference: Docker Container Forensics Tools + +## docker inspect - Container Details + +### Syntax +```bash +docker inspect +docker inspect --format '{{.HostConfig.Privileged}}' +docker inspect --format '{{json .Mounts}}' | jq +docker inspect --format '{{.GraphDriver.Data.MergedDir}}' +``` + +### Key JSON Paths +| Path | Description | +|------|-------------| +| `.HostConfig.Privileged` | Privileged mode status | +| `.HostConfig.CapAdd` | Added capabilities | +| `.HostConfig.PidMode` | PID namespace mode | +| `.HostConfig.NetworkMode` | Network namespace mode | +| `.Mounts` | Volume mount configuration | +| `.Config.User` | Container user | +| `.Config.Env` | Environment variables | +| `.Config.Image` | Source image name | +| `.State.StartedAt` | Container start time | + +## docker diff - Filesystem Changes + +### Syntax +```bash +docker diff +``` + +### Output Codes +| Code | Meaning | +|------|---------| +| `A` | File or directory was added | +| `C` | File or directory was changed | +| `D` | File or directory was deleted | + +## docker export - Container Filesystem Export + +### Syntax +```bash +docker export > container_fs.tar +docker export | gzip > container_fs.tar.gz +``` + +## docker commit / docker save - Image Preservation + +### Syntax +```bash +docker commit forensic-evidence:case001 +docker save forensic-evidence:case001 > evidence_image.tar +``` + +## docker logs - Container Log Retrieval + +### Syntax +```bash +docker logs --timestamps +docker logs --since 2024-01-15 +docker logs --tail 1000 +docker logs -f # Follow (live) +``` + +## dive - Image Layer Analysis + +### Syntax +```bash +dive # Interactive mode +dive --ci # CI mode (non-interactive) +dive --ci --json out.json # JSON output +``` + +### Output Includes +- Layer-by-layer filesystem changes +- Image efficiency score +- Wasted space analysis + +## container-diff - Image Comparison + +### Syntax +```bash +container-diff diff daemon://nginx:latest daemon://suspect:latest \ + --type=file --type=apt --type=history --json +``` + +### Diff Types +| Type | Description | +|------|-------------| +| `file` | File system differences | +| `apt` | APT package differences | +| `pip` | Python package differences | +| `history` | Docker build history differences | + +## Trivy - Vulnerability Scanning + +### Syntax +```bash +trivy image +trivy image --format json +trivy image --scanners vuln,secret +trivy fs /path/to/exported/container/ +``` + +### Severity Levels +`CRITICAL` | `HIGH` | `MEDIUM` | `LOW` | `UNKNOWN` + +## docker-explorer - Offline Forensics + +### Syntax +```bash +de.py -r /var/lib/docker list +de.py -r /var/lib/docker mount /mnt/forensic +de.py -r /var/lib/docker history +``` diff --git a/personas/_shared/skills/analyzing-docker-container-forensics/scripts/agent.py b/personas/_shared/skills/analyzing-docker-container-forensics/scripts/agent.py new file mode 100644 index 0000000..ed525bb --- /dev/null +++ b/personas/_shared/skills/analyzing-docker-container-forensics/scripts/agent.py @@ -0,0 +1,238 @@ +#!/usr/bin/env python3 +"""Docker container forensics agent for investigating compromised containers.""" + +import shlex +import subprocess +import json +import os +import sys +import hashlib +import datetime + + +def run_cmd(cmd): + """Execute a command and return output.""" + if isinstance(cmd, str): + cmd = shlex.split(cmd) + result = subprocess.run(cmd, capture_output=True, text=True, timeout=120) + return result.stdout.strip(), result.stderr.strip(), result.returncode + + +def list_containers(all_containers=True): + """List Docker containers with detailed information.""" + flags = "-a" if all_containers else "" + cmd = f"docker ps {flags} --no-trunc --format '{{{{json .}}}}'" + stdout, _, rc = run_cmd(cmd) + containers = [] + if rc == 0 and stdout: + for line in stdout.splitlines(): + try: + containers.append(json.loads(line)) + except json.JSONDecodeError: + continue + return containers + + +def inspect_container(container_id): + """Get detailed container inspection data.""" + stdout, _, rc = run_cmd(f"docker inspect {container_id}") + if rc == 0 and stdout: + return json.loads(stdout) + return None + + +def analyze_security_config(inspect_data): + """Analyze container security configuration for misconfigurations.""" + if isinstance(inspect_data, list): + inspect_data = inspect_data[0] + findings = [] + host_config = inspect_data.get("HostConfig", {}) + config = inspect_data.get("Config", {}) + + if host_config.get("Privileged"): + findings.append({"severity": "CRITICAL", "finding": "Container running in PRIVILEGED mode"}) + + cap_add = host_config.get("CapAdd") or [] + dangerous_caps = ["SYS_ADMIN", "SYS_PTRACE", "NET_ADMIN", "SYS_MODULE", + "DAC_OVERRIDE", "NET_RAW"] + for cap in cap_add: + if cap in dangerous_caps: + findings.append({"severity": "HIGH", "finding": f"Dangerous capability added: {cap}"}) + + if host_config.get("PidMode") == "host": + findings.append({"severity": "HIGH", "finding": "Shares host PID namespace"}) + + if host_config.get("NetworkMode") == "host": + findings.append({"severity": "HIGH", "finding": "Shares host network namespace"}) + + mounts = inspect_data.get("Mounts", []) + sensitive_paths = ["/", "/etc", "/var", "/root", "/home", "/var/run/docker.sock"] + for mount in mounts: + src = mount.get("Source", "") + rw = mount.get("RW", False) + if src in sensitive_paths and rw: + findings.append({ + "severity": "CRITICAL", + "finding": f"Sensitive host path mounted RW: {src} -> {mount.get('Destination')}" + }) + if "docker.sock" in src: + findings.append({ + "severity": "CRITICAL", + "finding": "Docker socket mounted (container can control Docker daemon)" + }) + + user = config.get("User", "") + if not user or user == "root": + findings.append({"severity": "MEDIUM", "finding": "Running as root user"}) + + env_vars = config.get("Env", []) + secret_keywords = ["PASSWORD", "SECRET", "KEY", "TOKEN", "CREDENTIAL", "API_KEY"] + for env in env_vars: + key = env.split("=")[0] + if any(s in key.upper() for s in secret_keywords): + findings.append({"severity": "HIGH", "finding": f"Sensitive env var exposed: {key}"}) + + return findings + + +def get_filesystem_changes(container_id): + """Get filesystem changes between container and its image.""" + stdout, _, rc = run_cmd(f"docker diff {container_id}") + changes = {"added": [], "changed": [], "deleted": []} + if rc == 0 and stdout: + for line in stdout.splitlines(): + line = line.strip() + if line.startswith("A "): + changes["added"].append(line[2:]) + elif line.startswith("C "): + changes["changed"].append(line[2:]) + elif line.startswith("D "): + changes["deleted"].append(line[2:]) + return changes + + +def detect_suspicious_files(changes): + """Analyze filesystem changes for indicators of compromise.""" + suspicious_patterns = [ + "/tmp/", "/dev/shm/", "/root/", ".sh", ".py", ".elf", + "reverse", "shell", "backdoor", "miner", "xmr", "nc ", + ".php", "webshell", "c2", "beacon", + ] + suspicious_changes = ["/etc/passwd", "/etc/shadow", "/etc/crontab", + "/etc/ssh", ".bashrc", "/etc/sudoers", "authorized_keys"] + + findings = [] + for f in changes["added"]: + for pattern in suspicious_patterns: + if pattern in f.lower(): + findings.append({"type": "ADDED", "path": f, "reason": f"Matches pattern: {pattern}"}) + break + for f in changes["changed"]: + for pattern in suspicious_changes: + if pattern in f.lower(): + findings.append({"type": "CHANGED", "path": f, "reason": f"Critical file modified"}) + break + return findings + + +def export_container(container_id, output_path): + """Export container filesystem as a tarball for offline analysis.""" + with open(output_path, "wb") as out_f: + result = subprocess.run( + ["docker", "export", container_id], + stdout=out_f, stderr=subprocess.PIPE, + timeout=120, + ) + if result.returncode == 0 and os.path.exists(output_path): + sha256 = hashlib.sha256() + with open(output_path, "rb") as f: + for chunk in iter(lambda: f.read(65536), b""): + sha256.update(chunk) + return True, sha256.hexdigest() + return False, None + + +def get_container_logs(container_id, tail=500): + """Retrieve container logs with timestamps.""" + stdout, stderr, rc = run_cmd(f"docker logs --timestamps --tail {tail} {container_id}") + return stdout + "\n" + stderr if rc == 0 else None + + +def scan_image_vulnerabilities(image_name): + """Run Trivy vulnerability scan on a container image.""" + cmd = f"trivy image --format json {image_name}" + stdout, _, rc = run_cmd(cmd) + if rc == 0 and stdout: + try: + return json.loads(stdout) + except json.JSONDecodeError: + return None + return None + + +def generate_report(container_id, inspect_data, security_findings, + fs_changes, suspicious_files): + """Generate a forensic analysis report.""" + container_name = "unknown" + image = "unknown" + if inspect_data: + data = inspect_data[0] if isinstance(inspect_data, list) else inspect_data + container_name = data.get("Name", "").lstrip("/") + image = data.get("Config", {}).get("Image", "unknown") + + report = { + "report_type": "Docker Container Forensics", + "timestamp": datetime.datetime.utcnow().isoformat() + "Z", + "container_id": container_id, + "container_name": container_name, + "image": image, + "security_findings": security_findings, + "filesystem_changes": { + "added": len(fs_changes["added"]), + "changed": len(fs_changes["changed"]), + "deleted": len(fs_changes["deleted"]), + }, + "suspicious_files": suspicious_files, + } + return report + + +if __name__ == "__main__": + print("=" * 60) + print("Docker Container Forensics Agent") + print("Security analysis, filesystem diffing, evidence collection") + print("=" * 60) + + container_id = sys.argv[1] if len(sys.argv) > 1 else None + + if container_id: + print(f"\n[*] Analyzing container: {container_id}") + + inspect_data = inspect_container(container_id) + if not inspect_data: + print("[ERROR] Failed to inspect container. Is Docker running?") + sys.exit(1) + + print("\n--- Security Configuration Analysis ---") + findings = analyze_security_config(inspect_data) + for f in findings: + print(f"[{f['severity']}] {f['finding']}") + + print("\n--- Filesystem Changes ---") + changes = get_filesystem_changes(container_id) + print(f" Added: {len(changes['added'])}, Changed: {len(changes['changed'])}, " + f"Deleted: {len(changes['deleted'])}") + + print("\n--- Suspicious Files ---") + suspicious = detect_suspicious_files(changes) + for s in suspicious: + print(f"[!] {s['type']}: {s['path']} ({s['reason']})") + + report = generate_report(container_id, inspect_data, findings, changes, suspicious) + print(f"\n[*] Report:\n{json.dumps(report, indent=2)}") + else: + print("\n[*] Listing all containers...") + containers = list_containers() + for c in containers: + print(f" {c.get('ID', '?')[:12]} {c.get('Names', '?')} {c.get('Status', '?')}") + print(f"\n[DEMO] Usage: python agent.py ") diff --git a/personas/_shared/skills/analyzing-email-headers-for-phishing-investigation/LICENSE b/personas/_shared/skills/analyzing-email-headers-for-phishing-investigation/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-email-headers-for-phishing-investigation/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-email-headers-for-phishing-investigation/SKILL.md b/personas/_shared/skills/analyzing-email-headers-for-phishing-investigation/SKILL.md new file mode 100644 index 0000000..32b98c2 --- /dev/null +++ b/personas/_shared/skills/analyzing-email-headers-for-phishing-investigation/SKILL.md @@ -0,0 +1,327 @@ +--- +name: analyzing-email-headers-for-phishing-investigation +description: Parse and analyze email headers to trace the origin of phishing emails, verify sender authenticity, and identify + spoofing through SPF, DKIM, and DMARC validation. +domain: cybersecurity +subdomain: digital-forensics +tags: +- forensics +- email-analysis +- phishing +- spf +- dkim +- dmarc +- header-analysis +version: '1.0' +author: mahipal +license: Apache-2.0 +atlas_techniques: +- AML.T0052 +nist_csf: +- RS.AN-01 +- RS.AN-03 +- DE.AE-02 +- RS.MA-01 +--- + +# Analyzing Email Headers for Phishing Investigation + +## When to Use +- When investigating a suspected phishing email to determine its true origin +- For verifying sender authenticity and detecting email spoofing +- During incident response when a user has clicked a phishing link +- When tracing the delivery path and relay servers of a suspicious email +- For validating SPF, DKIM, and DMARC alignment to identify forgery + +## Prerequisites +- Raw email headers from the suspicious message (EML or MSG format) +- Understanding of SMTP protocol and email header fields +- Access to DNS lookup tools (dig, nslookup) for SPF/DKIM/DMARC verification +- Email header analysis tools (MHA, emailheaders.net concepts) +- Python with email parsing libraries for automated analysis +- Access to threat intelligence platforms for IP/domain reputation + +## Workflow + +### Step 1: Extract Raw Email Headers + +```bash +# Export from Outlook: Open email > File > Properties > Internet Headers +# Export from Gmail: Open email > Three dots > Show original +# Export from Thunderbird: View > Message Source + +# If working with EML file from forensic image +cp /mnt/evidence/Users/suspect/AppData/Local/Microsoft/Outlook/phishing_email.eml \ + /cases/case-2024-001/email/ + +# If working with PST file, extract individual messages +pip install pypff +python3 << 'PYEOF' +import pypff + +pst = pypff.file() +pst.open("/cases/case-2024-001/email/outlook.pst") +root = pst.get_root_folder() + +def extract_messages(folder, path=""): + for i in range(folder.get_number_of_sub_messages()): + msg = folder.get_sub_message(i) + headers = msg.get_transport_headers() + subject = msg.get_subject() + if headers: + filename = f"/cases/case-2024-001/email/msg_{i}_{subject[:30]}.txt" + with open(filename, 'w') as f: + f.write(headers) + for i in range(folder.get_number_of_sub_folders()): + extract_messages(folder.get_sub_folder(i)) + +extract_messages(root) +PYEOF +``` + +### Step 2: Parse the Email Header Chain + +```bash +# Parse headers using Python email library +python3 << 'PYEOF' +import email +from email import policy + +with open('/cases/case-2024-001/email/phishing_email.eml', 'r') as f: + msg = email.message_from_file(f, policy=policy.default) + +print("=== KEY HEADER FIELDS ===") +print(f"From: {msg['From']}") +print(f"To: {msg['To']}") +print(f"Subject: {msg['Subject']}") +print(f"Date: {msg['Date']}") +print(f"Message-ID: {msg['Message-ID']}") +print(f"Reply-To: {msg['Reply-To']}") +print(f"Return-Path: {msg['Return-Path']}") +print(f"X-Mailer: {msg['X-Mailer']}") +print(f"X-Originating-IP: {msg['X-Originating-IP']}") + +print("\n=== RECEIVED HEADERS (bottom-up = chronological) ===") +received_headers = msg.get_all('Received') +if received_headers: + for i, header in enumerate(reversed(received_headers)): + print(f"\nHop {i+1}: {header.strip()}") + +print("\n=== AUTHENTICATION RESULTS ===") +auth_results = msg.get_all('Authentication-Results') +if auth_results: + for result in auth_results: + print(result) + +print(f"\nARC-Authentication-Results: {msg.get('ARC-Authentication-Results', 'Not present')}") +print(f"Received-SPF: {msg.get('Received-SPF', 'Not present')}") +print(f"DKIM-Signature: {msg.get('DKIM-Signature', 'Not present')}") +PYEOF +``` + +### Step 3: Validate SPF, DKIM, and DMARC Records + +```bash +# Extract the envelope sender domain +SENDER_DOMAIN="example-corp.com" + +# Check SPF record +dig TXT $SENDER_DOMAIN +short | grep "v=spf1" +# Example: "v=spf1 include:_spf.google.com include:sendgrid.net ~all" + +# Check DKIM record (selector from DKIM-Signature header, e.g., "s=selector1") +DKIM_SELECTOR="selector1" +dig TXT ${DKIM_SELECTOR}._domainkey.${SENDER_DOMAIN} +short + +# Check DMARC record +dig TXT _dmarc.${SENDER_DOMAIN} +short +# Example: "v=DMARC1; p=reject; rua=mailto:dmarc@example-corp.com; pct=100" + +# Verify the sending IP against SPF +# Extract IP from first Received header +SENDING_IP="203.0.113.45" + +# Manual SPF check using python +python3 << 'PYEOF' +import spf # pip install pyspf + +result, explanation = spf.check2( + i='203.0.113.45', + s='sender@example-corp.com', + h='mail.example-corp.com' +) +print(f"SPF Result: {result}") +print(f"Explanation: {explanation}") +# Results: pass, fail, softfail, neutral, none, temperror, permerror +PYEOF + +# Check if sending IP is in known malicious IP lists +# Query AbuseIPDB or VirusTotal +curl -s "https://api.abuseipdb.com/api/v2/check?ipAddress=${SENDING_IP}" \ + -H "Key: YOUR_API_KEY" -H "Accept: application/json" | python3 -m json.tool +``` + +### Step 4: Analyze Sender Domain and Infrastructure + +```bash +# WHOIS lookup on sender domain +whois $SENDER_DOMAIN | grep -iE '(registrar|creation|expiration|registrant|nameserver)' + +# Check domain age (recently registered domains are suspicious) +# DNS record investigation +dig A $SENDER_DOMAIN +short +dig MX $SENDER_DOMAIN +short +dig NS $SENDER_DOMAIN +short + +# Reverse DNS on sending IP +dig -x $SENDING_IP +short + +# Check for lookalike/typosquatting domains +# Compare with legitimate domain using visual similarity +python3 << 'PYEOF' +import Levenshtein # pip install python-Levenshtein + +legitimate = "microsoft.com" +suspicious = "micr0soft.com" + +distance = Levenshtein.distance(legitimate, suspicious) +ratio = Levenshtein.ratio(legitimate, suspicious) +print(f"Edit distance: {distance}") +print(f"Similarity ratio: {ratio:.2%}") +if ratio > 0.8: + print("WARNING: Likely typosquatting/lookalike domain!") +PYEOF + +# Check domain reputation on VirusTotal +curl -s "https://www.virustotal.com/api/v3/domains/${SENDER_DOMAIN}" \ + -H "x-apikey: YOUR_VT_API_KEY" | python3 -m json.tool + +# Check if the Reply-To differs from From (common phishing indicator) +python3 -c " +import email +with open('/cases/case-2024-001/email/phishing_email.eml') as f: + msg = email.message_from_file(f) +from_addr = email.utils.parseaddr(msg['From'])[1] +reply_to = email.utils.parseaddr(msg.get('Reply-To', msg['From']))[1] +if from_addr != reply_to: + print(f'WARNING: From ({from_addr}) != Reply-To ({reply_to})') +else: + print('From and Reply-To match') +" +``` + +### Step 5: Examine Email Body and Attachments + +```bash +# Extract URLs from email body +python3 << 'PYEOF' +import email +import re +from email import policy + +with open('/cases/case-2024-001/email/phishing_email.eml', 'r') as f: + msg = email.message_from_file(f, policy=policy.default) + +body = msg.get_body(preferencelist=('html', 'plain')) +if body: + content = body.get_content() + urls = re.findall(r'https?://[^\s<>"\']+', content) + print("=== URLs FOUND IN EMAIL BODY ===") + for url in set(urls): + print(f" {url}") + + # Check for URL obfuscation (display text != href) + href_pattern = re.findall(r']*href=["\']([^"\']+)["\'][^>]*>(.*?)', content, re.DOTALL) + print("\n=== HYPERLINK ANALYSIS ===") + for href, text in href_pattern: + display_url = re.findall(r'https?://[^\s<]+', text) + if display_url and display_url[0] != href: + print(f" MISMATCH: Display='{display_url[0]}' -> Actual='{href}'") + +# Extract and hash attachments +print("\n=== ATTACHMENTS ===") +for part in msg.walk(): + if part.get_content_disposition() == 'attachment': + filename = part.get_filename() + content = part.get_payload(decode=True) + import hashlib + sha256 = hashlib.sha256(content).hexdigest() + print(f" File: {filename}, Size: {len(content)}, SHA-256: {sha256}") + with open(f'/cases/case-2024-001/email/attachments/{filename}', 'wb') as af: + af.write(content) +PYEOF + +# Submit attachment hashes to VirusTotal +# Submit URLs to URLhaus or PhishTank for reputation check +``` + +## Key Concepts + +| Concept | Description | +|---------|-------------| +| SPF (Sender Policy Framework) | DNS record specifying authorized mail servers for a domain | +| DKIM (DomainKeys Identified Mail) | Cryptographic signature verifying email content integrity | +| DMARC | Policy framework combining SPF and DKIM for sender authentication | +| Received headers | Server-added headers showing each hop in the delivery chain (read bottom to top) | +| Return-Path | Envelope sender address used for bounce messages; may differ from From | +| Message-ID | Unique identifier assigned by the originating mail server | +| X-Originating-IP | Original sender IP address (added by some mail services) | +| Header forgery | Attackers can forge From, Reply-To, and other headers but not Received chains | + +## Tools & Systems + +| Tool | Purpose | +|------|---------| +| MXToolbox | Online email header analyzer and DNS lookup | +| dig/nslookup | DNS record queries for SPF, DKIM, DMARC verification | +| pyspf | Python SPF record validation library | +| dkimpy | Python DKIM signature verification library | +| PhishTool | Specialized phishing email analysis platform | +| VirusTotal | URL and file reputation checking service | +| AbuseIPDB | IP address reputation database | +| whois | Domain registration information lookup | + +## Common Scenarios + +**Scenario 1: CEO Fraud / Business Email Compromise** +The email claims to be from the CEO but Reply-To points to a Gmail address, SPF fails because the sending IP is not authorized for the spoofed domain, DKIM is missing, and the From domain is a lookalike (ceo-company.com vs company.com). + +**Scenario 2: Credential Harvesting Phishing** +Email contains a link that displays "login.microsoft.com" but href points to a lookalike domain, the attachment is an HTML file containing a fake login page with credential exfiltration JavaScript, the sending domain was registered 3 days ago. + +**Scenario 3: Malware Delivery via Attachment** +Email with an Office document attachment containing macros, the sender domain passes SPF but the account was compromised, DKIM signature is valid (sent from legitimate infrastructure), attachment SHA-256 matches known malware on VirusTotal. + +**Scenario 4: Spear Phishing with Legitimate Service** +Attacker uses a legitimate email marketing service to send phishing, SPF and DKIM pass because the service is authorized, the phishing is in the content not the infrastructure, requires URL and content analysis rather than header authentication checks. + +## Output Format + +``` +Email Header Analysis Report: + Subject: "Urgent: Invoice Payment Required" + From: accounting@examp1e-corp.com (SPOOFED) + Reply-To: payments.urgent@gmail.com (MISMATCH) + Return-Path: + Date: 2024-01-15 09:23:45 UTC + + Delivery Path (4 hops): + Hop 1: mail-server.xyz [203.0.113.45] -> relay1.isp.com + Hop 2: relay1.isp.com -> mx.target-company.com + Hop 3: mx.target-company.com -> internal-filter.target.com + Hop 4: internal-filter.target.com -> mailbox + + Authentication: + SPF: FAIL (203.0.113.45 not authorized for examp1e-corp.com) + DKIM: NONE (no signature present) + DMARC: FAIL (p=none, no enforcement) + + Indicators of Phishing: + - Lookalike domain (examp1e-corp.com vs example-corp.com, 96% similar) + - From/Reply-To mismatch + - Domain registered 2 days before email sent + - URL in body points to credential harvesting page + - Attachment: invoice.xlsm (SHA-256: a3f2...) - Known malware on VT + + Risk Level: HIGH +``` diff --git a/personas/_shared/skills/analyzing-email-headers-for-phishing-investigation/references/api-reference.md b/personas/_shared/skills/analyzing-email-headers-for-phishing-investigation/references/api-reference.md new file mode 100644 index 0000000..6a690b5 --- /dev/null +++ b/personas/_shared/skills/analyzing-email-headers-for-phishing-investigation/references/api-reference.md @@ -0,0 +1,121 @@ +# API Reference: Email Header Analysis Tools + +## Python email Module + +### Parsing EML Files +```python +import email +from email import policy + +with open("phishing.eml", "r") as f: + msg = email.message_from_file(f, policy=policy.default) + +msg["From"] # From header +msg["To"] # To header +msg["Subject"] # Subject line +msg["Message-ID"] # Unique message identifier +msg["Reply-To"] # Reply-To address +msg["Return-Path"] # Envelope sender +msg.get_all("Received") # All Received headers (list) +msg.get_all("Authentication-Results") # Auth results +``` + +### Body and Attachment Extraction +```python +body = msg.get_body(preferencelist=("html", "plain")) +content = body.get_content() + +for part in msg.walk(): + if part.get_content_disposition() == "attachment": + filename = part.get_filename() + data = part.get_payload(decode=True) +``` + +## dig - DNS Record Lookup + +### SPF Record +```bash +dig TXT example.com +short +# Output: "v=spf1 include:_spf.google.com ~all" +``` + +### DKIM Record +```bash +dig TXT selector1._domainkey.example.com +short +``` + +### DMARC Record +```bash +dig TXT _dmarc.example.com +short +# Output: "v=DMARC1; p=reject; rua=mailto:dmarc@example.com" +``` + +## pyspf - SPF Validation (Python) + +### Syntax +```python +import spf +result, explanation = spf.check2( + i="203.0.113.45", # Sending IP + s="sender@example.com", # Envelope sender + h="mail.example.com" # HELO hostname +) +# Results: pass, fail, softfail, neutral, none, temperror, permerror +``` + +## dkimpy - DKIM Verification (Python) + +### Syntax +```python +import dkim +with open("email.eml", "rb") as f: + message = f.read() +result = dkim.verify(message) +# Returns True/False +``` + +## AbuseIPDB - IP Reputation + +### API Endpoint +```bash +curl -G "https://api.abuseipdb.com/api/v2/check" \ + -H "Key: YOUR_API_KEY" \ + -H "Accept: application/json" \ + -d "ipAddress=203.0.113.45" -d "maxAgeInDays=90" +``` + +### Response Fields +| Field | Description | +|-------|-------------| +| `abuseConfidenceScore` | 0-100 confidence of abuse | +| `totalReports` | Number of abuse reports | +| `countryCode` | Source country | +| `isp` | Internet service provider | + +## VirusTotal - Domain/URL Reputation + +### Domain Lookup +```bash +curl -H "x-apikey: YOUR_KEY" \ + "https://www.virustotal.com/api/v3/domains/suspicious.com" +``` + +### URL Scan +```bash +curl -X POST "https://www.virustotal.com/api/v3/urls" \ + -H "x-apikey: YOUR_KEY" \ + -d "url=http://suspicious-url.com/login" +``` + +## whois - Domain Registration + +### Syntax +```bash +whois suspicious-domain.com +``` + +### Key Fields +- `Registrar` - Domain registrar +- `Creation Date` - When domain was registered +- `Registrant` - Domain owner info +- `Name Server` - Authoritative DNS servers diff --git a/personas/_shared/skills/analyzing-email-headers-for-phishing-investigation/scripts/agent.py b/personas/_shared/skills/analyzing-email-headers-for-phishing-investigation/scripts/agent.py new file mode 100644 index 0000000..9754f32 --- /dev/null +++ b/personas/_shared/skills/analyzing-email-headers-for-phishing-investigation/scripts/agent.py @@ -0,0 +1,229 @@ +#!/usr/bin/env python3 +"""Email header analysis agent for phishing investigation and sender verification.""" + +import email +import email.utils +import re +import hashlib +import os +import sys +import subprocess +from email import policy + + +def parse_email_file(eml_path): + """Parse an EML file and extract key header fields.""" + with open(eml_path, "r", errors="replace") as f: + msg = email.message_from_file(f, policy=policy.default) + headers = { + "from": str(msg["From"] or ""), + "to": str(msg["To"] or ""), + "subject": str(msg["Subject"] or ""), + "date": str(msg["Date"] or ""), + "message_id": str(msg["Message-ID"] or ""), + "reply_to": str(msg["Reply-To"] or ""), + "return_path": str(msg["Return-Path"] or ""), + "x_mailer": str(msg["X-Mailer"] or ""), + "x_originating_ip": str(msg["X-Originating-IP"] or ""), + } + return msg, headers + + +def extract_received_chain(msg): + """Extract and parse the Received header chain (bottom-up = chronological).""" + received_headers = msg.get_all("Received") or [] + hops = [] + ip_pattern = re.compile(r"\[?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\]?") + for i, header in enumerate(reversed(received_headers)): + ips = ip_pattern.findall(header) + hops.append({ + "hop": i + 1, + "header": header.strip()[:200], + "ips": ips, + }) + return hops + + +def extract_authentication_results(msg): + """Extract SPF, DKIM, and DMARC results from Authentication-Results headers.""" + auth_results = msg.get_all("Authentication-Results") or [] + received_spf = str(msg.get("Received-SPF", "")) + dkim_sig = str(msg.get("DKIM-Signature", "")) + results = { + "spf": "unknown", + "dkim": "unknown", + "dmarc": "unknown", + "raw_authentication_results": [], + "received_spf": received_spf, + "has_dkim_signature": bool(dkim_sig), + } + for ar in auth_results: + results["raw_authentication_results"].append(ar.strip()) + ar_lower = ar.lower() + if "spf=" in ar_lower: + spf_match = re.search(r"spf=(\w+)", ar_lower) + if spf_match: + results["spf"] = spf_match.group(1) + if "dkim=" in ar_lower: + dkim_match = re.search(r"dkim=(\w+)", ar_lower) + if dkim_match: + results["dkim"] = dkim_match.group(1) + if "dmarc=" in ar_lower: + dmarc_match = re.search(r"dmarc=(\w+)", ar_lower) + if dmarc_match: + results["dmarc"] = dmarc_match.group(1) + return results + + +def check_from_replyto_mismatch(headers): + """Detect mismatch between From and Reply-To addresses.""" + from_addr = email.utils.parseaddr(headers["from"])[1].lower() + reply_to = headers["reply_to"] + if reply_to: + reply_addr = email.utils.parseaddr(reply_to)[1].lower() + if reply_addr and from_addr != reply_addr: + return True, from_addr, reply_addr + return False, from_addr, None + + +def extract_urls(msg): + """Extract all URLs from the email body.""" + body = msg.get_body(preferencelist=("html", "plain")) + urls = [] + if body: + content = body.get_content() + urls = list(set(re.findall(r"https?://[^\s<>\"']+", content))) + return urls + + +def detect_url_mismatch(msg): + """Detect hyperlinks where display text differs from actual href.""" + body = msg.get_body(preferencelist=("html",)) + mismatches = [] + if body: + content = body.get_content() + href_pattern = re.findall( + r']*href=["\']([^"\']+)["\'][^>]*>(.*?)', content, re.DOTALL + ) + for href, text in href_pattern: + display_urls = re.findall(r"https?://[^\s<]+", text) + if display_urls: + for display_url in display_urls: + if display_url.rstrip("/") != href.rstrip("/"): + mismatches.append({ + "display_url": display_url, + "actual_url": href, + }) + return mismatches + + +def extract_attachments(msg, output_dir=None): + """Extract and hash all email attachments.""" + attachments = [] + for part in msg.walk(): + if part.get_content_disposition() == "attachment": + filename = part.get_filename() or "unnamed_attachment" + content = part.get_payload(decode=True) + if content: + sha256 = hashlib.sha256(content).hexdigest() + md5 = hashlib.md5(content).hexdigest() + att_info = { + "filename": filename, + "size": len(content), + "sha256": sha256, + "md5": md5, + "content_type": part.get_content_type(), + } + if output_dir: + os.makedirs(output_dir, exist_ok=True) + filepath = os.path.join(output_dir, filename) + with open(filepath, "wb") as f: + f.write(content) + att_info["saved_to"] = filepath + attachments.append(att_info) + return attachments + + +def dns_lookup(domain, record_type="TXT"): + """Perform DNS lookup for SPF/DKIM/DMARC records.""" + stdout, _, rc = subprocess.run( + ["dig", record_type, domain, "+short"], + capture_output=True, text=True, timeout=10 + ).stdout, "", 0 + return stdout.strip() if stdout else "" + + +def check_domain_spf(domain): + """Look up the SPF record for a domain.""" + return dns_lookup(domain, "TXT") + + +def check_domain_dmarc(domain): + """Look up the DMARC record for a domain.""" + return dns_lookup(f"_dmarc.{domain}", "TXT") + + +def generate_phishing_indicators(headers, auth, hops, url_mismatches, attachments): + """Compile a list of phishing indicators from the analysis.""" + indicators = [] + mismatch, from_addr, reply_addr = check_from_replyto_mismatch(headers) + if mismatch: + indicators.append(f"From/Reply-To mismatch: {from_addr} vs {reply_addr}") + if auth["spf"] in ("fail", "softfail"): + indicators.append(f"SPF {auth['spf']}") + if auth["dkim"] == "fail" or not auth["has_dkim_signature"]: + indicators.append("DKIM failed or missing") + if auth["dmarc"] in ("fail", "none"): + indicators.append(f"DMARC {auth['dmarc']}") + if url_mismatches: + indicators.append(f"{len(url_mismatches)} URL display/href mismatches detected") + for att in attachments: + if any(att["filename"].endswith(ext) for ext in [".exe", ".scr", ".vbs", ".js", + ".docm", ".xlsm", ".bat", ".ps1", ".hta"]): + indicators.append(f"Suspicious attachment: {att['filename']}") + return indicators + + +if __name__ == "__main__": + print("=" * 60) + print("Email Header Phishing Analysis Agent") + print("SPF/DKIM/DMARC validation, URL analysis, attachment extraction") + print("=" * 60) + + eml_file = sys.argv[1] if len(sys.argv) > 1 else None + + if eml_file and os.path.exists(eml_file): + print(f"\n[*] Analyzing: {eml_file}") + msg, headers = parse_email_file(eml_file) + print(f" From: {headers['from']}") + print(f" To: {headers['to']}") + print(f" Subject: {headers['subject']}") + print(f" Date: {headers['date']}") + + hops = extract_received_chain(msg) + print(f"\n[*] Delivery path: {len(hops)} hops") + for hop in hops: + print(f" Hop {hop['hop']}: IPs={hop['ips']}") + + auth = extract_authentication_results(msg) + print(f"\n[*] Authentication: SPF={auth['spf']} DKIM={auth['dkim']} DMARC={auth['dmarc']}") + + urls = extract_urls(msg) + print(f"\n[*] URLs found: {len(urls)}") + url_mismatches = detect_url_mismatch(msg) + for m in url_mismatches: + print(f" [!] MISMATCH: Display='{m['display_url']}' Actual='{m['actual_url']}'") + + attachments = extract_attachments(msg) + print(f"\n[*] Attachments: {len(attachments)}") + for att in attachments: + print(f" {att['filename']} ({att['size']} bytes) SHA256={att['sha256'][:16]}...") + + indicators = generate_phishing_indicators(headers, auth, hops, url_mismatches, attachments) + if indicators: + print(f"\n[!] PHISHING INDICATORS:") + for ind in indicators: + print(f" - {ind}") + else: + print(f"\n[DEMO] Usage: python agent.py ") + print("[*] Provide an EML file for phishing analysis.") diff --git a/personas/_shared/skills/analyzing-ethereum-smart-contract-vulnerabilities/LICENSE b/personas/_shared/skills/analyzing-ethereum-smart-contract-vulnerabilities/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-ethereum-smart-contract-vulnerabilities/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-ethereum-smart-contract-vulnerabilities/SKILL.md b/personas/_shared/skills/analyzing-ethereum-smart-contract-vulnerabilities/SKILL.md new file mode 100644 index 0000000..a94a14a --- /dev/null +++ b/personas/_shared/skills/analyzing-ethereum-smart-contract-vulnerabilities/SKILL.md @@ -0,0 +1,67 @@ +--- +name: analyzing-ethereum-smart-contract-vulnerabilities +description: Perform static and symbolic analysis of Solidity smart contracts using Slither and Mythril to detect reentrancy, + integer overflow, access control, and other vulnerability classes before deployment to Ethereum mainnet. +domain: cybersecurity +subdomain: blockchain-security +tags: +- ethereum +- solidity +- smart-contract +- slither +- mythril +- blockchain +- defi +- audit +version: '1.0' +author: mahipal +license: Apache-2.0 +nist_csf: +- PR.DS-01 +- PR.DS-02 +- ID.RA-01 +--- + +# Analyzing Ethereum Smart Contract Vulnerabilities + +## Overview + +Smart contract vulnerabilities have led to billions of dollars in losses across DeFi protocols. Unlike traditional software, deployed smart contracts are immutable and handle real financial assets, making pre-deployment security analysis critical. Slither performs fast static analysis using an intermediate representation to detect over 90 vulnerability patterns in seconds, while Mythril uses symbolic execution and SMT solving to discover complex execution path vulnerabilities like reentrancy and integer overflows. This skill covers running both tools against Solidity contracts, interpreting results, triaging findings by severity, and generating audit reports. + + +## When to Use + +- When investigating security incidents that require analyzing ethereum smart contract vulnerabilities +- When building detection rules or threat hunting queries for this domain +- When SOC analysts need structured procedures for this analysis type +- When validating security monitoring coverage for related attack techniques + +## Prerequisites + +- Python 3.10+ with pip +- Slither (pip install slither-analyzer) and solc compiler +- Mythril (pip install mythril) with solc-select for compiler version management +- Solidity source code or compiled contract bytecode +- Foundry or Hardhat development framework (optional, for project-level analysis) + +## Steps + +### Step 1: Run Slither Static Analysis + +Execute Slither against the contract codebase to identify vulnerability patterns, optimization opportunities, and code quality issues using its 90+ built-in detectors. + +### Step 2: Run Mythril Symbolic Execution + +Run Mythril deep analysis to explore execution paths and discover reentrancy, unchecked external calls, and arithmetic vulnerabilities that require path-sensitive analysis. + +### Step 3: Triage and Correlate Findings + +Combine results from both tools, deduplicate findings, assess severity based on exploitability and financial impact, and filter false positives. + +### Step 4: Generate Audit Report + +Produce a structured audit report with vulnerability descriptions, affected code locations, exploit scenarios, and remediation recommendations. + +## Expected Output + +JSON report listing vulnerabilities with SWC (Smart Contract Weakness Classification) identifiers, severity ratings, affected functions, and suggested fixes. diff --git a/personas/_shared/skills/analyzing-ethereum-smart-contract-vulnerabilities/references/api-reference.md b/personas/_shared/skills/analyzing-ethereum-smart-contract-vulnerabilities/references/api-reference.md new file mode 100644 index 0000000..aa8f97a --- /dev/null +++ b/personas/_shared/skills/analyzing-ethereum-smart-contract-vulnerabilities/references/api-reference.md @@ -0,0 +1,103 @@ +# API Reference: Analyzing Ethereum Smart Contract Vulnerabilities + +## Slither CLI + +```bash +# Basic analysis +slither contracts/ + +# JSON output +slither contracts/ --json slither-report.json + +# Run specific detector only +slither contracts/ --detect reentrancy-eth,unprotected-upgrade + +# List all detectors +slither --list-detectors + +# Print contract summary +slither contracts/ --print human-summary + +# Generate inheritance graph +slither contracts/ --print inheritance-graph +``` + +## Mythril CLI + +```bash +# Analyze single contract +myth analyze contracts/Token.sol + +# JSON output +myth analyze contracts/Token.sol -o json + +# Set execution timeout +myth analyze contracts/Token.sol --execution-timeout 300 + +# Analyze deployed bytecode +myth analyze --address 0x1234... --rpc infura + +# Increase analysis depth +myth analyze contracts/Token.sol --max-depth 50 --transaction-count 3 +``` + +## Slither Detector Severity Levels + +| Impact | Confidence | Example Detectors | +|--------|------------|-------------------| +| High | High | reentrancy-eth, suicidal, arbitrary-send-eth | +| High | Medium | controlled-delegatecall, reentrancy-no-eth | +| Medium | High | locked-ether, incorrect-equality | +| Medium | Medium | uninitialized-state, shadowing-state | +| Low | High | naming-convention, solc-version | +| Informational | High | pragma, dead-code | + +## SWC Registry (Key Entries) + +| SWC ID | Title | Tool Coverage | +|--------|-------|---------------| +| SWC-101 | Integer Overflow/Underflow | Mythril | +| SWC-104 | Unchecked Call Return | Slither + Mythril | +| SWC-106 | Unprotected SELFDESTRUCT | Slither + Mythril | +| SWC-107 | Reentrancy | Slither + Mythril | +| SWC-110 | Assert Violation | Mythril | +| SWC-112 | Delegatecall to Untrusted Callee | Slither | +| SWC-115 | tx.origin Authentication | Slither | +| SWC-116 | Block Timestamp Dependence | Mythril | +| SWC-120 | Weak Randomness | Slither | + +## Installation + +```bash +# Slither (requires solc) +pip install slither-analyzer +solc-select install 0.8.20 +solc-select use 0.8.20 + +# Mythril +pip install mythril +``` + +## Slither JSON Output Structure + +```json +{ + "success": true, + "results": { + "detectors": [{ + "check": "reentrancy-eth", + "impact": "High", + "confidence": "Medium", + "description": "Reentrancy in Contract.withdraw()", + "elements": [{"source_mapping": {"filename_short": "Contract.sol", "lines": [42, 43]}}] + }] + } +} +``` + +### References + +- Slither: https://github.com/crytic/slither +- Mythril: https://github.com/Consensys/mythril +- SWC Registry: https://swcregistry.io/ +- Solidity Security: https://docs.soliditylang.org/en/latest/security-considerations.html diff --git a/personas/_shared/skills/analyzing-ethereum-smart-contract-vulnerabilities/scripts/agent.py b/personas/_shared/skills/analyzing-ethereum-smart-contract-vulnerabilities/scripts/agent.py new file mode 100644 index 0000000..dbd2508 --- /dev/null +++ b/personas/_shared/skills/analyzing-ethereum-smart-contract-vulnerabilities/scripts/agent.py @@ -0,0 +1,168 @@ +#!/usr/bin/env python3 +"""Smart Contract Security Agent - runs Slither and Mythril analysis on Solidity contracts.""" + +import json +import argparse +import logging +import subprocess +from collections import defaultdict +from datetime import datetime + +logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s") +logger = logging.getLogger(__name__) + +SWC_REGISTRY = { + "SWC-101": "Integer Overflow and Underflow", + "SWC-104": "Unchecked Call Return Value", + "SWC-106": "Unprotected SELFDESTRUCT", + "SWC-107": "Reentrancy", + "SWC-110": "Assert Violation", + "SWC-112": "Delegatecall to Untrusted Callee", + "SWC-113": "DoS with Failed Call", + "SWC-115": "Authorization through tx.origin", + "SWC-116": "Block values as a proxy for time", + "SWC-120": "Weak Sources of Randomness", +} + + +def run_slither(contract_path): + """Run Slither static analysis on Solidity contract.""" + cmd = ["slither", contract_path, "--json", "-"] + result = subprocess.run(cmd, capture_output=True, text=True, timeout=120) + try: + return json.loads(result.stdout) if result.stdout else {} + except json.JSONDecodeError: + logger.error("Slither JSON parse failed") + return {} + + +def run_mythril(contract_path, timeout=300): + """Run Mythril symbolic execution analysis.""" + cmd = ["myth", "analyze", contract_path, "--execution-timeout", str(timeout), "-o", "json"] + result = subprocess.run(cmd, capture_output=True, text=True, timeout=timeout + 60) + try: + return json.loads(result.stdout) if result.stdout else {} + except json.JSONDecodeError: + logger.error("Mythril JSON parse failed") + return {} + + +def analyze_slither_results(slither_output): + """Parse and categorize Slither detector findings.""" + findings = [] + by_severity = defaultdict(int) + by_detector = defaultdict(int) + for detector in slither_output.get("results", {}).get("detectors", []): + severity = detector.get("impact", "informational").lower() + by_severity[severity] += 1 + det_name = detector.get("check", "unknown") + by_detector[det_name] += 1 + elements = detector.get("elements", []) + location = "" + if elements: + elem = elements[0] + location = f"{elem.get('source_mapping', {}).get('filename_short', '')}:" \ + f"L{elem.get('source_mapping', {}).get('lines', [0])[0] if elem.get('source_mapping', {}).get('lines') else 0}" + findings.append({ + "detector": det_name, + "severity": severity, + "description": detector.get("description", "")[:200], + "location": location, + "confidence": detector.get("confidence", ""), + }) + return { + "total": len(findings), + "by_severity": dict(by_severity), + "by_detector": dict(sorted(by_detector.items(), key=lambda x: x[1], reverse=True)[:15]), + "findings": sorted(findings, key=lambda x: {"high": 0, "medium": 1, "low": 2, "informational": 3}.get(x["severity"], 4)), + } + + +def analyze_mythril_results(mythril_output): + """Parse Mythril symbolic execution findings.""" + findings = [] + by_swc = defaultdict(int) + for issue in mythril_output.get("issues", []): + swc_id = issue.get("swc-id", "") + swc_key = f"SWC-{swc_id}" if swc_id else "unknown" + by_swc[swc_key] += 1 + severity = issue.get("severity", "Medium").lower() + findings.append({ + "swc_id": swc_key, + "swc_title": SWC_REGISTRY.get(swc_key, issue.get("title", "")), + "severity": severity, + "description": issue.get("description", "")[:200], + "contract": issue.get("contract", ""), + "function": issue.get("function", ""), + "line_number": issue.get("lineno", 0), + }) + return { + "total": len(findings), + "by_swc": dict(by_swc), + "findings": findings, + } + + +def deduplicate_findings(slither_findings, mythril_findings): + """Merge and deduplicate findings from both tools.""" + combined = [] + seen = set() + for f in slither_findings.get("findings", []): + key = (f.get("location", ""), f.get("detector", "")) + if key not in seen: + seen.add(key) + combined.append({**f, "source": "slither"}) + for f in mythril_findings.get("findings", []): + key = (f.get("contract", "") + str(f.get("line_number", 0)), f.get("swc_id", "")) + if key not in seen: + seen.add(key) + combined.append({**f, "source": "mythril"}) + return combined + + +def generate_report(contract_path, slither_analysis, mythril_analysis, combined): + critical_high = sum(1 for f in combined if f.get("severity") in ("high", "critical")) + return { + "timestamp": datetime.utcnow().isoformat(), + "contract": contract_path, + "slither_analysis": { + "total_findings": slither_analysis["total"], + "by_severity": slither_analysis["by_severity"], + "top_detectors": slither_analysis["by_detector"], + }, + "mythril_analysis": { + "total_findings": mythril_analysis["total"], + "by_swc": mythril_analysis["by_swc"], + }, + "combined_findings": len(combined), + "critical_high_findings": critical_high, + "audit_result": "FAIL" if critical_high > 0 else "PASS", + "findings": combined[:30], + } + + +def main(): + parser = argparse.ArgumentParser(description="Solidity Smart Contract Security Analysis Agent") + parser.add_argument("--contract", required=True, help="Path to Solidity contract or project directory") + parser.add_argument("--mythril-timeout", type=int, default=300, help="Mythril execution timeout (seconds)") + parser.add_argument("--skip-mythril", action="store_true", help="Skip Mythril (slow symbolic execution)") + parser.add_argument("--output", default="smart_contract_audit_report.json") + args = parser.parse_args() + + slither_output = run_slither(args.contract) + slither_analysis = analyze_slither_results(slither_output) + mythril_analysis = {"total": 0, "by_swc": {}, "findings": []} + if not args.skip_mythril: + mythril_output = run_mythril(args.contract, args.mythril_timeout) + mythril_analysis = analyze_mythril_results(mythril_output) + combined = deduplicate_findings(slither_analysis, mythril_analysis) + report = generate_report(args.contract, slither_analysis, mythril_analysis, combined) + with open(args.output, "w") as f: + json.dump(report, f, indent=2, default=str) + logger.info("Smart contract audit: %d findings (%d critical/high), result: %s", + report["combined_findings"], report["critical_high_findings"], report["audit_result"]) + print(json.dumps(report, indent=2, default=str)) + + +if __name__ == "__main__": + main() diff --git a/personas/_shared/skills/analyzing-golang-malware-with-ghidra/LICENSE b/personas/_shared/skills/analyzing-golang-malware-with-ghidra/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-golang-malware-with-ghidra/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-golang-malware-with-ghidra/SKILL.md b/personas/_shared/skills/analyzing-golang-malware-with-ghidra/SKILL.md new file mode 100644 index 0000000..99fb989 --- /dev/null +++ b/personas/_shared/skills/analyzing-golang-malware-with-ghidra/SKILL.md @@ -0,0 +1,311 @@ +--- +name: analyzing-golang-malware-with-ghidra +description: Reverse engineer Go-compiled malware using Ghidra with specialized scripts for function recovery, string extraction, + and type reconstruction in stripped Go binaries. +domain: cybersecurity +subdomain: malware-analysis +tags: +- golang +- ghidra +- reverse-engineering +- malware-analysis +- binary-analysis +- go-malware +- disassembly +version: '1.0' +author: mahipal +license: Apache-2.0 +nist_csf: +- DE.AE-02 +- RS.AN-03 +- ID.RA-01 +- DE.CM-01 +--- +# Analyzing Golang Malware with Ghidra + +## Overview + +Go (Golang) has become a popular language for malware authors due to its cross-compilation capabilities, static linking that produces self-contained binaries, and the complexity it introduces for reverse engineering. Go binaries contain the entire runtime, standard library, and all dependencies statically linked, resulting in large binaries (often 5-15MB) with thousands of functions. Ghidra struggles with Go-specific string formats (non-null-terminated), stripped function names, and goroutine concurrency patterns. Specialized tools like GoResolver (Volexity, 2025) use control-flow graph similarity to automatically deobfuscate and recover function names in stripped or obfuscated Go binaries. + + +## When to Use + +- When investigating security incidents that require analyzing golang malware with ghidra +- When building detection rules or threat hunting queries for this domain +- When SOC analysts need structured procedures for this analysis type +- When validating security monitoring coverage for related attack techniques + +## Prerequisites + +- Ghidra 11.0+ with JDK 17+ +- GoResolver plugin (for function name recovery) +- Go Reverse Engineering Tool Kit (go-re.tk) +- Python 3.9+ for helper scripts +- Understanding of Go runtime internals (goroutines, channels, interfaces) +- Familiarity with Go binary structure (pclntab, moduledata, itab) + +## Key Concepts + +### Go Binary Structure + +Go binaries embed rich metadata in the `pclntab` (PC Line Table) structure, which maps program counters to function names, source files, and line numbers. Even stripped binaries retain this metadata. The `moduledata` structure contains pointers to type information, itabs (interface tables), and the pclntab itself. Go strings are stored as a pointer-length pair rather than null-terminated C strings. + +### Function Recovery in Stripped Binaries + +Despite stripping symbol tables, Go binaries retain function names within the pclntab. However, obfuscation tools like garble rename functions to random strings. GoResolver addresses this by computing control-flow graph signatures of obfuscated functions and matching them against a database of known Go standard library and third-party package functions. + +### Crate/Dependency Extraction + +Go's dependency management embeds module paths and version strings in the binary. Extracting these reveals the malware's third-party dependencies (HTTP libraries, encryption packages, C2 frameworks), which provides insight into capabilities without full reverse engineering. + +## Workflow + +### Step 1: Initial Binary Analysis + +```python +#!/usr/bin/env python3 +"""Analyze Go binary metadata for malware analysis.""" +import struct +import sys +import re + + +def find_go_build_info(data): + """Extract Go build information from binary.""" + # Go buildinfo magic: \xff Go buildinf: + magic = b'\xff Go buildinf:' + offset = data.find(magic) + if offset == -1: + return None + + print(f"[+] Go build info at offset 0x{offset:x}") + + # Extract Go version string nearby + go_version = re.search(rb'go\d+\.\d+(?:\.\d+)?', data[offset:offset+256]) + if go_version: + print(f" Go Version: {go_version.group().decode()}") + + return offset + + +def find_pclntab(data): + """Locate the pclntab (PC Line Table) structure.""" + # pclntab magic bytes vary by Go version + magics = { + b'\xfb\xff\xff\xff\x00\x00': "Go 1.2-1.15", + b'\xfa\xff\xff\xff\x00\x00': "Go 1.16-1.17", + b'\xf1\xff\xff\xff\x00\x00': "Go 1.18-1.19", + b'\xf0\xff\xff\xff\x00\x00': "Go 1.20+", + } + + for magic, version in magics.items(): + offset = data.find(magic) + if offset != -1: + print(f"[+] pclntab found at 0x{offset:x} ({version})") + return offset, version + + return None, None + + +def extract_function_names(data, pclntab_offset): + """Extract function names from pclntab.""" + if pclntab_offset is None: + return [] + + functions = [] + # Function name strings follow specific patterns + func_pattern = re.compile( + rb'(?:main|runtime|fmt|net|os|crypto|encoding|io|sync|' + rb'syscall|reflect|strings|bytes|path|time|math|sort|' + rb'github\.com|golang\.org)[/\.][\w/.]+', + ) + + for match in func_pattern.finditer(data): + name = match.group().decode('utf-8', errors='replace') + if len(name) > 4 and len(name) < 200: + functions.append(name) + + return sorted(set(functions)) + + +def extract_go_strings(data): + """Extract Go-style strings (pointer+length pairs).""" + # Go strings are not null-terminated; extract readable sequences + strings = [] + ascii_pattern = re.compile(rb'[\x20-\x7e]{10,}') + + for match in ascii_pattern.finditer(data): + s = match.group().decode('ascii') + # Filter for interesting malware strings + interesting = [ + 'http', 'https', 'tcp', 'udp', 'dns', + 'cmd', 'shell', 'exec', 'upload', 'download', + 'encrypt', 'decrypt', 'key', 'token', 'password', + 'c2', 'beacon', 'agent', 'implant', 'bot', + 'mutex', 'persist', 'registry', 'scheduled', + ] + if any(kw in s.lower() for kw in interesting): + strings.append(s) + + return strings + + +def extract_dependencies(data): + """Extract Go module dependencies from binary.""" + deps = [] + # Module paths follow pattern: github.com/user/repo + dep_pattern = re.compile( + rb'((?:github\.com|gitlab\.com|golang\.org|gopkg\.in|' + rb'go\.etcd\.io|google\.golang\.org)/[^\x00\s]{5,80})' + ) + + for match in dep_pattern.finditer(data): + dep = match.group().decode('utf-8', errors='replace') + deps.append(dep) + + unique_deps = sorted(set(deps)) + return unique_deps + + +def analyze_go_binary(filepath): + """Full analysis of Go malware binary.""" + with open(filepath, 'rb') as f: + data = f.read() + + print(f"[+] Analyzing Go binary: {filepath}") + print(f" File size: {len(data):,} bytes") + print("=" * 60) + + # Build info + find_go_build_info(data) + + # pclntab + pclntab_offset, go_version = find_pclntab(data) + + # Functions + functions = extract_function_names(data, pclntab_offset) + print(f"\n[+] Recovered {len(functions)} function names") + + # Categorize functions + categories = { + "network": [], "crypto": [], "os_exec": [], + "file_io": [], "main": [], "third_party": [], + } + for f in functions: + if 'net/' in f or 'http' in f.lower(): + categories["network"].append(f) + elif 'crypto' in f: + categories["crypto"].append(f) + elif 'os/exec' in f or 'syscall' in f: + categories["os_exec"].append(f) + elif 'os.' in f or 'io/' in f: + categories["file_io"].append(f) + elif f.startswith('main.'): + categories["main"].append(f) + elif 'github.com' in f or 'golang.org' in f: + categories["third_party"].append(f) + + for cat, funcs in categories.items(): + if funcs: + print(f"\n [{cat}] ({len(funcs)} functions):") + for fn in funcs[:10]: + print(f" {fn}") + + # Dependencies + deps = extract_dependencies(data) + print(f"\n[+] Dependencies ({len(deps)}):") + for dep in deps[:20]: + print(f" {dep}") + + # Suspicious strings + sus_strings = extract_go_strings(data) + print(f"\n[+] Suspicious strings ({len(sus_strings)}):") + for s in sus_strings[:20]: + print(f" {s}") + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print(f"Usage: {sys.argv[0]} ") + sys.exit(1) + analyze_go_binary(sys.argv[1]) +``` + +### Step 2: Ghidra Analysis Script + +```python +# Ghidra script (run within Ghidra's script manager) +# Save as AnalyzeGoBinary.py in Ghidra scripts directory + +# @category MalwareAnalysis +# @description Analyze Go binary structure and recover metadata + +def analyze_go_binary_ghidra(): + """Ghidra script for Go binary analysis.""" + from ghidra.program.model.mem import MemoryAccessException + + program = getCurrentProgram() + memory = program.getMemory() + listing = program.getListing() + + print("[+] Go Binary Analysis Script") + print(f" Program: {program.getName()}") + + # Find pclntab + pclntab_magics = [ + bytes([0xf0, 0xff, 0xff, 0xff]), # Go 1.20+ + bytes([0xf1, 0xff, 0xff, 0xff]), # Go 1.18-1.19 + bytes([0xfa, 0xff, 0xff, 0xff]), # Go 1.16-1.17 + bytes([0xfb, 0xff, 0xff, 0xff]), # Go 1.2-1.15 + ] + + for magic in pclntab_magics: + addr = memory.findBytes( + program.getMinAddress(), magic, None, True, None + ) + if addr: + print(f"[+] pclntab found at {addr}") + # Create label + program.getSymbolTable().createLabel( + addr, "go_pclntab", None, + ghidra.program.model.symbol.SourceType.ANALYSIS + ) + break + + # Fix Go string definitions + # Go strings are ptr+len, not null terminated + print("[+] Fixing Go string references...") + + # Search for function names containing package paths + symbol_table = program.getSymbolTable() + func_count = 0 + for symbol in symbol_table.getAllSymbols(True): + name = symbol.getName() + if ('.' in name and + any(pkg in name for pkg in + ['main.', 'runtime.', 'net.', 'crypto.', 'os.'])): + func_count += 1 + + print(f"[+] Found {func_count} Go function symbols") + + +# Execute +analyze_go_binary_ghidra() +``` + +## Validation Criteria + +- Go version and build information extracted from binary +- pclntab located and parsed for function name recovery +- Third-party dependencies identified revealing malware capabilities +- Main package functions enumerated for targeted analysis +- Network, crypto, and OS exec functions categorized +- Ghidra analysis correctly labels Go runtime structures + +## References + +- [CUJO AI - Reverse Engineering Go Binaries with Ghidra](https://cujo.com/blog/reverse-engineering-go-binaries-with-ghidra/) +- [Volexity GoResolver](https://www.volexity.com/blog/2025/04/01/goresolver-using-control-flow-graph-similarity-to-deobfuscate-golang-binaries-automatically/) +- [Go Reverse Engineering Tool Kit](https://go-re.tk/about/) +- [SentinelOne AlphaGolang](https://www.sentinelone.com/labs/alphagolang-a-step-by-step-go-malware-reversing-methodology-for-ida-pro/) +- [Go Binary Reversing Notes](https://gist.github.com/0xdevalias/4e430914124c3fd2c51cb7ac2801acba) diff --git a/personas/_shared/skills/analyzing-golang-malware-with-ghidra/assets/template.md b/personas/_shared/skills/analyzing-golang-malware-with-ghidra/assets/template.md new file mode 100644 index 0000000..8b2f0ce --- /dev/null +++ b/personas/_shared/skills/analyzing-golang-malware-with-ghidra/assets/template.md @@ -0,0 +1,35 @@ +# Go Malware Analysis Report + +## Sample Information +| Field | Value | +|-------|-------| +| SHA-256 | | +| File Size | | +| Go Version | | +| Architecture | amd64 / arm64 / 386 | +| Stripped | Yes / No | +| Obfuscated | Yes (garble) / No | + +## Recovered Functions +| Category | Count | Key Functions | +|----------|-------|---------------| +| main | | | +| networking | | | +| crypto | | | +| os/exec | | | +| third-party | | | + +## Dependencies +| Module | Purpose | +|--------|---------| +| | | + +## C2 Infrastructure +| Indicator | Type | Value | +|-----------|------|-------| +| | URL / IP / Domain | | + +## Recommendations +1. Block identified C2 infrastructure +2. Create YARA rule for unique Go function signatures +3. Monitor for similar Go binary compilation artifacts diff --git a/personas/_shared/skills/analyzing-golang-malware-with-ghidra/references/api-reference.md b/personas/_shared/skills/analyzing-golang-malware-with-ghidra/references/api-reference.md new file mode 100644 index 0000000..5777b76 --- /dev/null +++ b/personas/_shared/skills/analyzing-golang-malware-with-ghidra/references/api-reference.md @@ -0,0 +1,90 @@ +# API Reference: Go Malware Analysis with Ghidra + +## Ghidra Go Analysis Setup + +### GoResolver Script (Volexity) +```bash +# Install GoResolver for stripped Go binary function recovery +git clone https://github.com/volexity/GoResolver +# Run against Ghidra project +analyzeHeadless /ghidra_projects MyProject -process go_malware.exe \ + -postScript GoResolver.java +``` + +### Ghidra Built-in Go Support (10.3+) +``` +File > Import > Select Go binary +Analysis > Auto Analyze (includes GolangAnalyzer) +Window > Function Tags > Filter "go." +``` + +## Go Binary Characteristics + +### Build Info Magic +``` +Offset in .go.buildinfo section: "\xff Go buildinf:" +``` + +### gopclntab Magic Bytes +| Go Version | Magic | +|------------|-------| +| 1.2-1.15 | `FB FF FF FF 00 00` | +| 1.16-1.17 | `FA FF FF FF 00 00` | +| 1.18-1.19 | `F0 FF FF FF 00 00` | +| 1.20+ | `F1 FF FF FF 00 00` | + +### String Format +Go strings are length-prefixed (not null-terminated): +``` +struct GoString { + char *ptr; // pointer to string data + int64 length; // string length +}; +``` + +## Go-Specific Ghidra Scripts + +### GoReSym (Mandiant) +```bash +GoReSym -t -d -p /path/to/binary +# -t: Recover type information +# -d: Dump function metadata +# -p: Print package listing +``` + +### redress (Go Reverse Engineering) +```bash +redress -src binary.exe # Reconstruct source tree +redress -pkg binary.exe # List packages +redress -type binary.exe # Type information +redress -string binary.exe # Go string extraction +redress -interface binary.exe # Interface types +``` + +## Go Obfuscation Tools + +| Tool | Technique | Detection | +|------|-----------|-----------| +| garble | Function name hashing, literal obfuscation | Hash-like symbols, missing debug info | +| gobfuscate | Package/function renaming | Random 8-char names | +| go-strip | Symbol table removal | Missing gopclntab entries | + +## Common Go Malware Families + +| Family | Type | Notable Packages | +|--------|------|-----------------| +| Sliver | C2 implant | protobuf, grpc, mtls | +| Merlin | C2 agent | http2, jose, websocket | +| Sunlogin/Cobalt | RAT | screenshot, clipboard, keylog | +| BianLian | Ransomware | crypto/aes, filepath.Walk | +| Royal | Ransomware | goroutine-based parallel encryption | + +## Key Ghidra Analysis Steps +``` +1. Search > For Strings > "go1." (version identification) +2. Search > For Bytes > FB FF FF FF (gopclntab) +3. Symbol Table > Filter "main." (entry points) +4. Navigation > Go To "runtime.main" (program start) +5. Decompiler > Check goroutine spawns (runtime.newproc) +6. Data Types > Apply GoString struct to string references +``` diff --git a/personas/_shared/skills/analyzing-golang-malware-with-ghidra/references/standards.md b/personas/_shared/skills/analyzing-golang-malware-with-ghidra/references/standards.md new file mode 100644 index 0000000..2614646 --- /dev/null +++ b/personas/_shared/skills/analyzing-golang-malware-with-ghidra/references/standards.md @@ -0,0 +1,29 @@ +# Go Binary Analysis Standards + +## Go Binary Structure +| Component | Description | Location | +|-----------|-------------|----------| +| pclntab | PC-to-function mapping table | .gopclntab or .text | +| moduledata | Runtime metadata structure | .noptrdata | +| itab | Interface method tables | .rodata | +| buildinfo | Go version and module info | .go.buildinfo | +| typelinks | Type descriptor table | .rodata | + +## pclntab Magic Bytes by Go Version +| Magic | Go Version | +|-------|-----------| +| 0xFBFFFFFF | 1.2 - 1.15 | +| 0xFAFFFFFF | 1.16 - 1.17 | +| 0xF1FFFFFF | 1.18 - 1.19 | +| 0xF0FFFFFF | 1.20+ | + +## Common Go Malware Families +- Sliver C2 implant +- Geacon (Go Cobalt Strike beacon) +- GoBruteforcer +- Kaiji botnet +- Chaos botnet (Go-based) + +## References +- [Go Runtime Source](https://github.com/golang/go/tree/master/src/runtime) +- [Go Internal ABI](https://go.dev/s/regcallabi) diff --git a/personas/_shared/skills/analyzing-golang-malware-with-ghidra/references/workflows.md b/personas/_shared/skills/analyzing-golang-malware-with-ghidra/references/workflows.md new file mode 100644 index 0000000..da39f40 --- /dev/null +++ b/personas/_shared/skills/analyzing-golang-malware-with-ghidra/references/workflows.md @@ -0,0 +1,37 @@ +# Go Malware Analysis Workflows + +## Workflow 1: Stripped Binary Recovery +``` +[Stripped Go Binary] --> [Find pclntab] --> [Recover Function Names] + | + v + [Apply GoResolver] --> [Deobfuscate Names] + | + v + [Categorize Functions] +``` + +## Workflow 2: Full Ghidra Analysis +``` +[Go Binary] --> [Import to Ghidra] --> [Run Go Analysis Scripts] + | + v + [Fix String References] + | + v + [Identify main Package] + | + v + [Analyze C2/Network Logic] +``` + +## Workflow 3: Dependency-Based Capability Assessment +``` +[Go Binary] --> [Extract Module Info] --> [List Dependencies] + | + v + [Map to Capabilities] + | + v + [Prioritize Analysis] +``` diff --git a/personas/_shared/skills/analyzing-golang-malware-with-ghidra/scripts/agent.py b/personas/_shared/skills/analyzing-golang-malware-with-ghidra/scripts/agent.py new file mode 100644 index 0000000..5eda1cb --- /dev/null +++ b/personas/_shared/skills/analyzing-golang-malware-with-ghidra/scripts/agent.py @@ -0,0 +1,267 @@ +#!/usr/bin/env python3 +"""Go malware analysis agent for Ghidra-assisted reverse engineering. + +Analyzes Go binaries to extract function names, strings, build metadata, +package information, and detects common Go malware characteristics. +""" + +import os +import sys +import json +import hashlib +import re +import math +from collections import Counter + + +def compute_hash(filepath): + """Compute SHA-256 hash of file.""" + sha256 = hashlib.sha256() + with open(filepath, "rb") as f: + for chunk in iter(lambda: f.read(65536), b""): + sha256.update(chunk) + return sha256.hexdigest() + + +def shannon_entropy(data): + """Calculate Shannon entropy.""" + if not data: + return 0.0 + freq = Counter(data) + length = len(data) + return -sum((c / length) * math.log2(c / length) for c in freq.values()) + + +def detect_go_binary(filepath): + """Detect if a binary is compiled with Go and extract version info.""" + with open(filepath, "rb") as f: + data = f.read() + + indicators = { + "is_go_binary": False, + "go_version": None, + "go_buildinfo": False, + "gopclntab_found": False, + } + + # Go build info magic + buildinfo_magic = b"\xff Go buildinf:" + offset = data.find(buildinfo_magic) + if offset != -1: + indicators["is_go_binary"] = True + indicators["go_buildinfo"] = True + + # Go version string + version_pattern = rb"go(\d+\.\d+(?:\.\d+)?)" + matches = re.findall(version_pattern, data) + if matches: + indicators["is_go_binary"] = True + versions = sorted(set(m.decode() for m in matches)) + indicators["go_version"] = versions[-1] if versions else None + + # gopclntab (Go PC line table) magic bytes + gopclntab_magics = [ + b"\xfb\xff\xff\xff\x00\x00", # Go 1.2-1.15 + b"\xfa\xff\xff\xff\x00\x00", # Go 1.16-1.17 + b"\xf0\xff\xff\xff\x00\x00", # Go 1.18+ + b"\xf1\xff\xff\xff\x00\x00", # Go 1.20+ + ] + for magic in gopclntab_magics: + if magic in data: + indicators["gopclntab_found"] = True + indicators["is_go_binary"] = True + break + + # Runtime strings + go_strings = [b"runtime.main", b"runtime.goexit", b"runtime.gopanic", + b"runtime.newproc", b"GOROOT", b"GOPATH"] + found_runtime = sum(1 for s in go_strings if s in data) + if found_runtime >= 2: + indicators["is_go_binary"] = True + indicators["runtime_strings_found"] = found_runtime + + return indicators + + +def extract_go_strings(filepath, min_length=6): + """Extract Go-style strings (length-prefixed, not null-terminated).""" + with open(filepath, "rb") as f: + data = f.read() + + # Standard ASCII string extraction + ascii_pattern = re.compile(rb"[\x20-\x7e]{%d,}" % min_length) + strings = [m.group().decode("ascii", errors="replace") for m in ascii_pattern.finditer(data)] + return strings + + +def extract_go_packages(strings_list): + """Identify Go packages from extracted strings.""" + packages = set() + pkg_pattern = re.compile(r"^([a-zA-Z0-9_]+(?:/[a-zA-Z0-9_.-]+)+)\.") + for s in strings_list: + match = pkg_pattern.match(s) + if match: + packages.add(match.group(1)) + # Also look for known Go import paths + for s in strings_list: + if s.startswith("github.com/") or s.startswith("golang.org/"): + parts = s.split("/") + if len(parts) >= 3: + packages.add("/".join(parts[:3])) + return sorted(packages) + + +SUSPICIOUS_GO_PACKAGES = { + "github.com/kbinani/screenshot": "Screen capture capability", + "github.com/atotto/clipboard": "Clipboard access", + "github.com/go-vgo/robotgo": "Desktop automation / keylogging", + "github.com/miekg/dns": "Custom DNS resolution (C2/tunneling)", + "golang.org/x/crypto/ssh": "SSH client (lateral movement)", + "github.com/shirou/gopsutil": "System enumeration", + "github.com/mitchellh/go-ps": "Process listing", + "github.com/gobuffalo/packr": "Binary resource embedding", + "github.com/Ne0nd0g/merlin": "Merlin C2 agent", + "github.com/BishopFox/sliver": "Sliver C2 framework", + "github.com/traefik/yaegi": "Go interpreter (dynamic execution)", +} + + +def detect_suspicious_packages(packages): + """Flag suspicious Go packages commonly used in malware.""" + findings = [] + for pkg in packages: + for sus_pkg, description in SUSPICIOUS_GO_PACKAGES.items(): + if sus_pkg in pkg: + findings.append({"package": pkg, "concern": description}) + return findings + + +def analyze_sections(filepath): + """Analyze PE/ELF sections for Go binary characteristics.""" + with open(filepath, "rb") as f: + magic = f.read(4) + f.seek(0) + data = f.read() + + sections = [] + if magic[:2] == b"MZ": # PE + try: + import pefile + pe = pefile.PE(data=data) + for section in pe.sections: + name = section.Name.rstrip(b"\x00").decode("ascii", errors="replace") + entropy = section.get_entropy() + sections.append({ + "name": name, "virtual_size": section.Misc_VirtualSize, + "raw_size": section.SizeOfRawData, "entropy": round(entropy, 3), + }) + pe.close() + except ImportError: + sections.append({"note": "pefile not installed"}) + elif magic[:4] == b"\x7fELF": + try: + from elftools.elf.elffile import ELFFile + from io import BytesIO + elf = ELFFile(BytesIO(data)) + for section in elf.iter_sections(): + sec_data = section.data() if section.header.sh_size > 0 else b"" + entropy = shannon_entropy(sec_data) if sec_data else 0 + sections.append({ + "name": section.name, "size": section.header.sh_size, + "entropy": round(entropy, 3), "type": section.header.sh_type, + }) + except ImportError: + sections.append({"note": "pyelftools not installed"}) + return sections + + +def detect_obfuscation(go_info, strings_list): + """Detect Go binary obfuscation (garble, gobfuscate).""" + indicators = {"obfuscated": False, "techniques": []} + + # Garble replaces function names with hashes + hash_names = sum(1 for s in strings_list if re.match(r"^[a-f0-9]{16,}$", s)) + if hash_names > 20: + indicators["obfuscated"] = True + indicators["techniques"].append("Possible garble obfuscation (hash-like function names)") + + # Missing gopclntab suggests stripping + if not go_info.get("gopclntab_found"): + indicators["techniques"].append("gopclntab not found - may be stripped or modified") + + # Low runtime string count + if go_info.get("runtime_strings_found", 0) < 2: + indicators["obfuscated"] = True + indicators["techniques"].append("Low Go runtime string count - possible obfuscation") + + return indicators + + +def generate_report(filepath): + """Generate comprehensive Go malware analysis report.""" + report = { + "file": filepath, + "sha256": compute_hash(filepath), + "size": os.path.getsize(filepath), + } + + go_info = detect_go_binary(filepath) + report["go_detection"] = go_info + + if not go_info["is_go_binary"]: + report["conclusion"] = "Not identified as a Go binary" + return report + + strings_list = extract_go_strings(filepath) + report["total_strings"] = len(strings_list) + + packages = extract_go_packages(strings_list) + report["packages"] = packages[:50] + + suspicious = detect_suspicious_packages(packages) + report["suspicious_packages"] = suspicious + + sections = analyze_sections(filepath) + report["sections"] = sections + + obfuscation = detect_obfuscation(go_info, strings_list) + report["obfuscation"] = obfuscation + + return report + + +if __name__ == "__main__": + print("=" * 60) + print("Go Malware Analysis Agent (Ghidra-assisted)") + print("Go binary detection, package extraction, obfuscation detection") + print("=" * 60) + + target = sys.argv[1] if len(sys.argv) > 1 else None + + if not target or not os.path.exists(target): + print("\n[DEMO] Usage: python agent.py ") + sys.exit(0) + + report = generate_report(target) + go = report.get("go_detection", {}) + print(f"\n[*] File: {target}") + print(f"[*] SHA-256: {report['sha256']}") + print(f"[*] Go binary: {go.get('is_go_binary', False)}") + print(f"[*] Go version: {go.get('go_version', 'unknown')}") + print(f"[*] Strings: {report.get('total_strings', 0)}") + + print("\n--- Packages ---") + for pkg in report.get("packages", [])[:15]: + print(f" {pkg}") + + print("\n--- Suspicious Packages ---") + for s in report.get("suspicious_packages", []): + print(f" [!] {s['package']}: {s['concern']}") + + print("\n--- Obfuscation ---") + obf = report.get("obfuscation", {}) + print(f" Obfuscated: {obf.get('obfuscated', False)}") + for t in obf.get("techniques", []): + print(f" {t}") + + print(f"\n{json.dumps(report, indent=2, default=str)}") diff --git a/personas/_shared/skills/analyzing-golang-malware-with-ghidra/scripts/process.py b/personas/_shared/skills/analyzing-golang-malware-with-ghidra/scripts/process.py new file mode 100644 index 0000000..1951c7d --- /dev/null +++ b/personas/_shared/skills/analyzing-golang-malware-with-ghidra/scripts/process.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python3 +""" +Go Malware Binary Analyzer + +Extracts metadata, function names, dependencies, and suspicious +indicators from Go-compiled malware binaries. + +Usage: + python process.py --file malware.exe --output report.json +""" + +import argparse +import json +import re +import struct +import sys +from pathlib import Path + + +PCLNTAB_MAGICS = { + b'\xf0\xff\xff\xff': "Go 1.20+", + b'\xf1\xff\xff\xff': "Go 1.18-1.19", + b'\xfa\xff\xff\xff': "Go 1.16-1.17", + b'\xfb\xff\xff\xff': "Go 1.2-1.15", +} + + +def find_pclntab(data): + for magic, version in PCLNTAB_MAGICS.items(): + offset = data.find(magic) + if offset != -1: + return offset, version + return None, None + + +def extract_go_version(data): + match = re.search(rb'go(\d+\.\d+(?:\.\d+)?)', data) + return match.group(1).decode() if match else "unknown" + + +def extract_functions(data): + func_pattern = re.compile( + rb'((?:main|runtime|fmt|net|os|crypto|encoding|io|sync|' + rb'syscall|reflect|strings|bytes|path|time|math|sort|' + rb'github\.com|golang\.org|gopkg\.in)[/\.][\w/.]+)' + ) + functions = set() + for match in func_pattern.finditer(data): + name = match.group(1).decode('utf-8', errors='replace') + if 4 < len(name) < 200: + functions.add(name) + return sorted(functions) + + +def extract_dependencies(data): + dep_pattern = re.compile( + rb'((?:github\.com|gitlab\.com|golang\.org|gopkg\.in|' + rb'go\.etcd\.io|google\.golang\.org)/[\w./-]{5,80})' + ) + deps = set() + for match in dep_pattern.finditer(data): + dep = match.group(1).decode('utf-8', errors='replace') + # Clean up trailing artifacts + dep = dep.rstrip('/.') + deps.add(dep) + return sorted(deps) + + +def extract_suspicious_strings(data): + interesting_patterns = [ + rb'https?://[\w./:?&=-]+', + rb'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(?::\d+)?', + rb'(?:cmd|powershell|bash|sh)(?:\.exe)?', + rb'(?:HKLM|HKCU)\\[^\x00]+', + rb'/etc/(?:passwd|shadow|crontab)', + ] + + results = {} + for pattern in interesting_patterns: + matches = re.findall(pattern, data) + if matches: + decoded = [m.decode('utf-8', errors='replace') for m in matches] + results[pattern.decode('utf-8', errors='replace')] = list(set(decoded)) + + return results + + +def categorize_functions(functions): + categories = { + "main_logic": [], + "networking": [], + "cryptography": [], + "os_execution": [], + "file_operations": [], + "third_party": [], + "runtime": [], + } + + for func in functions: + fl = func.lower() + if func.startswith('main.'): + categories["main_logic"].append(func) + elif any(x in fl for x in ['net/', 'http', 'tcp', 'udp', 'dns']): + categories["networking"].append(func) + elif 'crypto' in fl: + categories["cryptography"].append(func) + elif any(x in fl for x in ['os/exec', 'syscall']): + categories["os_execution"].append(func) + elif any(x in fl for x in ['os.', 'io/', 'ioutil']): + categories["file_operations"].append(func) + elif any(x in fl for x in ['github.com', 'golang.org', 'gopkg.in']): + categories["third_party"].append(func) + elif func.startswith('runtime.'): + categories["runtime"].append(func) + + return {k: v for k, v in categories.items() if v} + + +def analyze(filepath): + with open(filepath, 'rb') as f: + data = f.read() + + report = { + "file": str(filepath), + "size": len(data), + "go_version": extract_go_version(data), + } + + pclntab_offset, pclntab_version = find_pclntab(data) + report["pclntab"] = { + "offset": f"0x{pclntab_offset:x}" if pclntab_offset else None, + "version": pclntab_version, + } + + functions = extract_functions(data) + report["total_functions"] = len(functions) + report["function_categories"] = categorize_functions(functions) + + report["dependencies"] = extract_dependencies(data) + report["suspicious_strings"] = extract_suspicious_strings(data) + + return report + + +def main(): + parser = argparse.ArgumentParser(description="Go Malware Analyzer") + parser.add_argument("--file", required=True, help="Go binary to analyze") + parser.add_argument("--output", help="Output JSON report") + + args = parser.parse_args() + report = analyze(args.file) + + print(json.dumps(report, indent=2)) + + if args.output: + with open(args.output, 'w') as f: + json.dump(report, f, indent=2) + print(f"\n[+] Report saved to {args.output}") + + +if __name__ == "__main__": + main() diff --git a/personas/_shared/skills/analyzing-heap-spray-exploitation/LICENSE b/personas/_shared/skills/analyzing-heap-spray-exploitation/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-heap-spray-exploitation/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-heap-spray-exploitation/SKILL.md b/personas/_shared/skills/analyzing-heap-spray-exploitation/SKILL.md new file mode 100644 index 0000000..5cc7823 --- /dev/null +++ b/personas/_shared/skills/analyzing-heap-spray-exploitation/SKILL.md @@ -0,0 +1,59 @@ +--- +name: analyzing-heap-spray-exploitation +description: Detect and analyze heap spray attacks in memory dumps using Volatility3 plugins to identify NOP sled patterns, + shellcode landing zones, and suspicious large allocations in process virtual address space. +domain: cybersecurity +subdomain: malware-analysis +tags: +- malware-analysis +- memory-forensics +- heap-spray +- volatility3 +- exploit-analysis +version: '1.0' +author: mahipal +license: Apache-2.0 +nist_csf: +- DE.AE-02 +- RS.AN-03 +- ID.RA-01 +- DE.CM-01 +--- +# Analyzing Heap Spray Exploitation + +## Overview + +Heap spraying is an exploitation technique that fills large regions of a process's heap with attacker-controlled data (typically NOP sleds followed by shellcode) to increase the reliability of code execution exploits. This skill covers detecting heap spray artifacts in memory dumps using Volatility3's malfind, vadinfo, and memmap plugins, identifying suspicious contiguous memory allocations, scanning for NOP sled patterns (0x90, 0x0c0c0c0c), and extracting embedded shellcode for analysis. + + +## When to Use + +- When investigating security incidents that require analyzing heap spray exploitation +- When building detection rules or threat hunting queries for this domain +- When SOC analysts need structured procedures for this analysis type +- When validating security monitoring coverage for related attack techniques + +## Prerequisites + +- Python 3.9+ with `volatility3` framework installed +- Memory dump file (.raw, .vmem, .dmp format) +- Understanding of virtual memory layout and VAD (Virtual Address Descriptor) trees +- Familiarity with common shellcode patterns and NOP sled encodings + +## Steps + +### Step 1: Identify Suspicious Processes +Use Volatility3 windows.malfind to scan for processes with executable injected memory regions. + +### Step 2: Analyze VAD Entries +Examine VAD tree entries using windows.vadinfo for large contiguous allocations with RWX permissions. + +### Step 3: Scan for NOP Sled Patterns +Search suspicious memory regions for NOP sled signatures (0x90 sequences, 0x0c0c0c0c patterns). + +### Step 4: Extract and Analyze Shellcode +Dump suspicious memory regions and identify shellcode using byte pattern analysis. + +## Expected Output + +JSON report with suspicious processes, heap spray indicators, NOP sled locations, memory region sizes, and extracted shellcode hashes. diff --git a/personas/_shared/skills/analyzing-heap-spray-exploitation/references/api-reference.md b/personas/_shared/skills/analyzing-heap-spray-exploitation/references/api-reference.md new file mode 100644 index 0000000..230dec1 --- /dev/null +++ b/personas/_shared/skills/analyzing-heap-spray-exploitation/references/api-reference.md @@ -0,0 +1,55 @@ +# API Reference: Analyzing Heap Spray Exploitation + +## Volatility3 Plugins for Heap Spray Analysis + +| Plugin | Command | Purpose | +|--------|---------|---------| +| malfind | `vol -f dump.raw windows.malfind` | Find injected executable memory regions | +| vadinfo | `vol -f dump.raw windows.vadinfo` | Virtual Address Descriptor details | +| memmap | `vol -f dump.raw windows.memmap --pid PID --dump` | Dump process memory to files | +| pslist | `vol -f dump.raw windows.pslist` | List running processes | +| handles | `vol -f dump.raw windows.handles --pid PID` | List process handles | + +## Common Heap Spray NOP Sled Patterns + +| Pattern | Hex | Description | +|---------|-----|-------------| +| x86 NOP | 0x90909090 | Classic NOP instruction | +| 0x0C landing | 0x0C0C0C0C | Common heap spray address target | +| 0x0D landing | 0x0D0D0D0D | Alternative spray address | +| 0x0A landing | 0x0A0A0A0A | Alternative spray address | +| 0x41 fill | 0x41414141 | "AAAA" padding fill | + +## Shellcode Signatures + +| Bytes | Mnemonic | Context | +|-------|----------|---------| +| FC E8 | CLD; CALL | Common shellcode prologue | +| 60 E8 | PUSHAD; CALL | Register-saving shellcode start | +| 31 C0 50 68 | XOR EAX; PUSH; PUSH | Stack setup for API call | +| E8 FF FF FF FF | CALL $+5 | Self-locating shellcode (GetPC) | + +## Detection Thresholds + +| Indicator | Threshold | Meaning | +|-----------|-----------|---------| +| Large allocation | >= 1 MB per region | Suspicious heap allocation | +| Total spray size | >= 50 MB per process | Strong heap spray indicator | +| NOP sled count | >= 20 repeated bytes | NOP sled detected | +| RWX permissions | PAGE_EXECUTE_READWRITE | Injected executable code | + +## Install Volatility3 + +```bash +pip install volatility3 +# Or from source: +git clone https://github.com/volatilityfoundation/volatility3.git +cd volatility3 && pip install -e . +``` + +## References + +- Volatility3 GitHub: https://github.com/volatilityfoundation/volatility3 +- Volatility3 malfind: https://volatility3.readthedocs.io/en/latest/ +- Heap Spray Techniques: https://www.corelan.be/index.php/2011/12/31/exploit-writing-tutorial-part-11-heap-spraying-demystified/ +- DFRWS 2025 Workshop: https://webdiis.unizar.es/~ricardo/dfrws-eu-25-workshop/ diff --git a/personas/_shared/skills/analyzing-heap-spray-exploitation/scripts/agent.py b/personas/_shared/skills/analyzing-heap-spray-exploitation/scripts/agent.py new file mode 100644 index 0000000..d67b295 --- /dev/null +++ b/personas/_shared/skills/analyzing-heap-spray-exploitation/scripts/agent.py @@ -0,0 +1,219 @@ +#!/usr/bin/env python3 +"""Agent for analyzing heap spray exploitation in memory dumps. + +Detects heap spray artifacts using Volatility3 by scanning for +NOP sled patterns, large contiguous allocations, and injected +executable regions in process virtual address space. +""" +# For authorized forensic analysis only + +import argparse +import hashlib +import json +import os +import re +import subprocess +from collections import defaultdict +from datetime import datetime +from pathlib import Path + +NOP_PATTERNS = { + "x86_nop": b"\x90" * 16, + "heap_spray_0c": b"\x0c" * 16, + "heap_spray_0d": b"\x0d" * 16, + "heap_spray_0a": b"\x0a" * 16, + "heap_spray_04": b"\x04" * 16, + "heap_spray_41": b"\x41" * 16, +} + +SHELLCODE_MARKERS = [ + b"\xfc\xe8", # CLD; CALL + b"\x60\xe8", # PUSHAD; CALL + b"\xeb\x10\x5a", # JMP SHORT; POP EDX + b"\x31\xc0\x50\x68", # XOR EAX; PUSH; PUSH + b"\xe8\xff\xff\xff\xff", # CALL $+5 (self-locating) +] + +SUSPICIOUS_ALLOC_THRESHOLD = 0x100000 # 1 MB + + +class HeapSprayAnalyzer: + """Detects heap spray exploitation artifacts in memory dumps.""" + + def __init__(self, memory_dump, output_dir="./heap_spray_analysis"): + self.memory_dump = memory_dump + self.output_dir = Path(output_dir) + self.output_dir.mkdir(parents=True, exist_ok=True) + self.findings = [] + + def _run_vol3(self, plugin, extra_args=None): + """Run a Volatility3 plugin and return stdout.""" + cmd = ["vol", "-f", self.memory_dump, plugin] + if extra_args: + cmd.extend(extra_args) + try: + result = subprocess.run(cmd, capture_output=True, text=True, timeout=300) + return result.stdout + except (FileNotFoundError, subprocess.TimeoutExpired): + return "" + + def run_malfind(self): + """Run windows.malfind to detect injected executable memory.""" + output = self._run_vol3("windows.malfind") + entries = [] + current = {} + for line in output.splitlines(): + parts = line.split() + if len(parts) >= 6 and parts[0].isdigit(): + if current: + entries.append(current) + current = { + "pid": int(parts[0]), + "process": parts[1], + "start_addr": parts[2], + "end_addr": parts[3], + "protection": parts[5] if len(parts) > 5 else "", + } + elif current and line.strip().startswith("0x"): + hex_match = re.findall(r"[0-9a-fA-F]{2}", line.split(" ")[0] if " " in line else line) + if "hex_bytes" not in current: + current["hex_bytes"] = "" + current["hex_bytes"] += "".join(hex_match) + if current: + entries.append(current) + return entries + + def run_vadinfo(self): + """Run windows.vadinfo to find large suspicious allocations.""" + output = self._run_vol3("windows.vadinfo") + large_allocs = [] + for line in output.splitlines(): + parts = line.split() + if len(parts) >= 5 and parts[0].isdigit(): + try: + pid = int(parts[0]) + start = int(parts[2], 16) if parts[2].startswith("0x") else 0 + end = int(parts[3], 16) if parts[3].startswith("0x") else 0 + size = end - start + if size >= SUSPICIOUS_ALLOC_THRESHOLD: + large_allocs.append({ + "pid": pid, "process": parts[1], + "start": hex(start), "end": hex(end), + "size_bytes": size, "size_mb": round(size / (1024 * 1024), 2), + }) + except (ValueError, IndexError): + continue + return large_allocs + + def scan_dump_for_patterns(self, dump_path): + """Scan a memory dump file for NOP sled and shellcode patterns.""" + matches = {"nop_sleds": [], "shellcode_markers": []} + try: + with open(dump_path, "rb") as f: + data = f.read() + except (FileNotFoundError, PermissionError): + return matches + + for name, pattern in NOP_PATTERNS.items(): + offset = 0 + count = 0 + while True: + idx = data.find(pattern, offset) + if idx == -1: + break + count += 1 + offset = idx + len(pattern) + if count > 100: + break + if count > 0: + matches["nop_sleds"].append({"pattern": name, "occurrences": count}) + + for marker in SHELLCODE_MARKERS: + idx = data.find(marker) + if idx != -1: + context = data[idx:idx + 64] + matches["shellcode_markers"].append({ + "offset": hex(idx), + "bytes": context.hex()[:128], + "sha256": hashlib.sha256(context).hexdigest(), + }) + return matches + + def dump_process_memory(self, pid): + """Dump a process's memory using Volatility3 memmap.""" + dump_dir = self.output_dir / f"pid_{pid}" + dump_dir.mkdir(exist_ok=True) + self._run_vol3("windows.memmap", ["--pid", str(pid), "--dump", + "--output-dir", str(dump_dir)]) + dumps = list(dump_dir.glob("*.dmp")) + return [str(d) for d in dumps] + + def analyze(self): + """Run full heap spray analysis pipeline.""" + malfind_results = self.run_malfind() + large_allocs = self.run_vadinfo() + + spray_candidates = defaultdict(list) + for alloc in large_allocs: + spray_candidates[alloc["pid"]].append(alloc) + + for pid, allocs in spray_candidates.items(): + total_mb = sum(a["size_mb"] for a in allocs) + if total_mb > 50: + self.findings.append({ + "severity": "high", "type": "Heap Spray Indicator", + "detail": f"PID {pid}: {total_mb:.1f} MB in {len(allocs)} large allocations", + }) + + for entry in malfind_results: + hex_bytes = entry.get("hex_bytes", "") + if hex_bytes.count("90") > 20 or hex_bytes.count("0c") > 20: + self.findings.append({ + "severity": "critical", "type": "NOP Sled in Injected Region", + "detail": f"PID {entry['pid']} ({entry['process']}): " + f"NOP sled at {entry['start_addr']}", + }) + + return { + "malfind_entries": malfind_results, + "large_allocations": large_allocs, + "spray_candidate_pids": list(spray_candidates.keys()), + } + + def generate_report(self): + analysis = self.analyze() + + report = { + "report_date": datetime.utcnow().isoformat(), + "memory_dump": self.memory_dump, + "malfind_count": len(analysis["malfind_entries"]), + "large_allocation_count": len(analysis["large_allocations"]), + **analysis, + "findings": self.findings, + "total_findings": len(self.findings), + } + out = self.output_dir / "heap_spray_report.json" + with open(out, "w") as f: + json.dump(report, f, indent=2, default=str) + print(json.dumps(report, indent=2, default=str)) + return report + + +def main(): + parser = argparse.ArgumentParser( + description="Analyze memory dumps for heap spray exploitation artifacts" + ) + parser.add_argument("memory_dump", help="Path to memory dump file (.raw, .vmem, .dmp)") + parser.add_argument("--output-dir", default="./heap_spray_analysis", + help="Output directory for report and dumps") + parser.add_argument("--alloc-threshold", type=int, default=0x100000, + help="Minimum allocation size in bytes to flag (default: 1MB)") + args = parser.parse_args() + + os.makedirs(args.output_dir, exist_ok=True) + analyzer = HeapSprayAnalyzer(args.memory_dump, output_dir=args.output_dir) + analyzer.generate_report() + + +if __name__ == "__main__": + main() diff --git a/personas/_shared/skills/analyzing-indicators-of-compromise/LICENSE b/personas/_shared/skills/analyzing-indicators-of-compromise/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-indicators-of-compromise/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-indicators-of-compromise/SKILL.md b/personas/_shared/skills/analyzing-indicators-of-compromise/SKILL.md new file mode 100644 index 0000000..3539430 --- /dev/null +++ b/personas/_shared/skills/analyzing-indicators-of-compromise/SKILL.md @@ -0,0 +1,163 @@ +--- +name: analyzing-indicators-of-compromise +description: 'Analyzes indicators of compromise (IOCs) including IP addresses, domains, file hashes, URLs, and email artifacts + to determine maliciousness confidence, campaign attribution, and blocking priority. Use when triaging IOCs from phishing + emails, security alerts, or external threat feeds; enriching raw IOCs with multi-source intelligence; or making block/monitor/whitelist + decisions. Activates for requests involving VirusTotal, AbuseIPDB, MalwareBazaar, MISP, or IOC enrichment pipelines. + + ' +domain: cybersecurity +subdomain: threat-intelligence +tags: +- IOC +- VirusTotal +- AbuseIPDB +- MalwareBazaar +- MISP +- threat-intelligence +- STIX +- NIST-CSF +version: 1.0.0 +author: mahipal +license: Apache-2.0 +atlas_techniques: +- AML.T0052 +nist_csf: +- ID.RA-01 +- ID.RA-05 +- DE.CM-01 +- DE.AE-02 +--- +# Analyzing Indicators of Compromise + +## When to Use + +Use this skill when: +- A phishing email or alert generates IOCs (URLs, IP addresses, file hashes) requiring rapid triage +- Automated feeds deliver bulk IOCs that need confidence scoring before ingestion into blocking controls +- An incident investigation requires contextual enrichment of observed network artifacts + +**Do not use** this skill in isolation for high-stakes blocking decisions — always combine automated enrichment with analyst judgment, especially for shared infrastructure (CDNs, cloud providers). + +## Prerequisites + +- VirusTotal API key (free or Enterprise) for multi-AV and sandbox lookup +- AbuseIPDB API key for IP reputation checks +- MISP instance or TIP for cross-referencing against known campaigns +- Python with `requests` and `vt-py` libraries, or SOAR platform with pre-built connectors + +## Workflow + +### Step 1: Normalize and Classify IOC Types + +Before enriching, classify each IOC: +- **IPv4/IPv6 address**: Check if RFC 1918 private (skip external enrichment), validate format +- **Domain/FQDN**: Defang for safe handling (`evil[.]com`), extract registered domain via tldextract +- **URL**: Extract domain + path separately; check for redirectors +- **File hash**: Identify hash type (MD5/SHA-1/SHA-256); prefer SHA-256 for uniqueness +- **Email address**: Split into domain (check MX/DMARC) and local part for pattern analysis + +Defang IOCs in documentation (replace `.` with `[.]` and `://` with `[://]`) to prevent accidental clicks. + +### Step 2: Multi-Source Enrichment + +**VirusTotal (file hash, URL, IP, domain)**: +```python +import vt + +client = vt.Client("YOUR_VT_API_KEY") + +# File hash lookup +file_obj = client.get_object(f"/files/{sha256_hash}") +detections = file_obj.last_analysis_stats +print(f"Malicious: {detections['malicious']}/{sum(detections.values())}") + +# Domain analysis +domain_obj = client.get_object(f"/domains/{domain}") +print(domain_obj.last_analysis_stats) +print(domain_obj.reputation) +client.close() +``` + +**AbuseIPDB (IP addresses)**: +```python +import requests + +response = requests.get( + "https://api.abuseipdb.com/api/v2/check", + headers={"Key": "YOUR_KEY", "Accept": "application/json"}, + params={"ipAddress": "1.2.3.4", "maxAgeInDays": 90} +) +data = response.json()["data"] +print(f"Confidence: {data['abuseConfidenceScore']}%, Reports: {data['totalReports']}") +``` + +**MalwareBazaar (file hashes)**: +```python +response = requests.post( + "https://mb-api.abuse.ch/api/v1/", + data={"query": "get_info", "hash": sha256_hash} +) +result = response.json() +if result["query_status"] == "ok": + print(result["data"][0]["tags"], result["data"][0]["signature"]) +``` + +### Step 3: Contextualize with Campaign Attribution + +Query MISP for existing events matching the IOC: +```python +from pymisp import PyMISP + +misp = PyMISP("https://misp.example.com", "API_KEY") +results = misp.search(value="evil-domain.com", type_attribute="domain") +for event in results: + print(event["Event"]["info"], event["Event"]["threat_level_id"]) +``` + +Check Shodan for IP context (hosting provider, open ports, banners) to identify if the IP belongs to bulletproof hosting or a legitimate cloud provider (false positive risk). + +### Step 4: Assign Confidence Score and Disposition + +Apply a tiered decision framework: +- **Block (High Confidence ≥ 70%)**: ≥15 AV detections on VT, AbuseIPDB score ≥70, matches known malware family or campaign +- **Monitor/Alert (Medium 40–69%)**: 5–14 AV detections, moderate AbuseIPDB score, no campaign attribution +- **Whitelist/Investigate (Low <40%)**: ≤4 AV detections, no abuse reports, legitimate service (Google, Cloudflare CDN IPs) +- **False Positive**: Legitimate business service incorrectly flagged; document and exclude from future alerts + +### Step 5: Document and Distribute + +Record findings in TIP/MISP with: +- All enrichment data collected (timestamps, source, score) +- Disposition decision and rationale +- Blocking actions taken (firewall, proxy, DNS sinkhole) +- Related incident ticket number + +Export to STIX indicator object with confidence field set appropriately. + +## Key Concepts + +| Term | Definition | +|------|-----------| +| **IOC** | Indicator of Compromise — observable network or host artifact indicating potential compromise | +| **Enrichment** | Process of adding contextual data to a raw IOC from multiple intelligence sources | +| **Defanging** | Modifying IOCs (replacing `.` with `[.]`) to prevent accidental activation in documentation | +| **False Positive Rate** | Percentage of benign artifacts incorrectly flagged as malicious; critical for tuning block thresholds | +| **Sinkhole** | DNS server redirecting malicious domain lookups to a benign IP for detection without blocking traffic entirely | +| **TTL** | Time-to-live for an IOC in blocking controls; IP indicators should expire after 30 days, domains after 90 days | + +## Tools & Systems + +- **VirusTotal**: Multi-engine malware scanner and threat intelligence platform with 70+ AV engines, sandbox reports, and community comments +- **AbuseIPDB**: Community-maintained IP reputation database with 90-day abuse report history +- **MalwareBazaar (abuse.ch)**: Free malware hash repository with YARA rule associations and malware family tagging +- **URLScan.io**: Free URL analysis service that captures screenshots, DOM, and network requests for phishing URL triage +- **Shodan**: Internet-wide scan data providing hosting provider, open ports, and banner information for IP enrichment + +## Common Pitfalls + +- **Blocking shared infrastructure**: CDN IPs (Cloudflare 104.21.x.x, AWS CloudFront) may legitimately host malicious content but blocking the IP disrupts thousands of legitimate sites. +- **VT score obsession**: Low VT detection count does not mean benign — zero-day malware and custom APT tools often score 0 initially. Check sandbox behavior, MISP, and passive DNS. +- **Missing defanging**: Pasting live IOCs in emails or Confluence docs can trigger automated URL scanners or phishing tools. +- **No expiration policy**: IOCs without TTLs accumulate in blocklists indefinitely, generating false positives as infrastructure is repurposed by legitimate users. +- **Over-relying on single source**: VirusTotal aggregates AV opinions — all may be wrong or lag behind emerging malware. Use 3+ independent sources for high-stakes decisions. diff --git a/personas/_shared/skills/analyzing-indicators-of-compromise/references/api-reference.md b/personas/_shared/skills/analyzing-indicators-of-compromise/references/api-reference.md new file mode 100644 index 0000000..f460a9c --- /dev/null +++ b/personas/_shared/skills/analyzing-indicators-of-compromise/references/api-reference.md @@ -0,0 +1,120 @@ +# API Reference: IOC Enrichment Tools + +## VirusTotal API v3 + +### File Hash Lookup +```bash +curl -H "x-apikey: $VT_KEY" \ + "https://www.virustotal.com/api/v3/files/" +``` + +### Domain Lookup +```bash +curl -H "x-apikey: $VT_KEY" \ + "https://www.virustotal.com/api/v3/domains/" +``` + +### IP Lookup +```bash +curl -H "x-apikey: $VT_KEY" \ + "https://www.virustotal.com/api/v3/ip_addresses/" +``` + +### Key Response Fields +| Field | Description | +|-------|-------------| +| `last_analysis_stats.malicious` | Number of AV engines detecting as malicious | +| `last_analysis_stats.undetected` | AV engines finding clean | +| `reputation` | Community reputation score | +| `popular_threat_classification` | Threat label consensus | + +### Python (vt-py) +```python +import vt +client = vt.Client("API_KEY") +file_obj = client.get_object(f"/files/{sha256}") +stats = file_obj.last_analysis_stats +client.close() +``` + +## AbuseIPDB API v2 + +### Check IP +```bash +curl -G "https://api.abuseipdb.com/api/v2/check" \ + -H "Key: $ABUSE_KEY" -H "Accept: application/json" \ + -d "ipAddress=1.2.3.4" -d "maxAgeInDays=90" +``` + +### Response Fields +| Field | Description | +|-------|-------------| +| `abuseConfidenceScore` | 0-100 abuse confidence | +| `totalReports` | Report count in timeframe | +| `countryCode` | Source country | +| `isp` | Internet service provider | +| `isTor` | Tor exit node flag | + +## MalwareBazaar API (abuse.ch) + +### Hash Lookup +```bash +curl -X POST "https://mb-api.abuse.ch/api/v1/" \ + -d "query=get_info" -d "hash=" +``` + +### Response Fields +| Field | Description | +|-------|-------------| +| `signature` | Malware family name | +| `tags` | Associated tags | +| `file_type` | File type identification | +| `first_seen` | First submission date | +| `reporter` | Submitting analyst | + +## URLScan.io API + +### Submit URL for Scan +```bash +curl -X POST "https://urlscan.io/api/v1/scan/" \ + -H "API-Key: $KEY" -H "Content-Type: application/json" \ + -d '{"url": "http://suspicious.com", "visibility": "private"}' +``` + +### Retrieve Results +```bash +curl "https://urlscan.io/api/v1/result//" +``` + +## Shodan API + +### IP Lookup +```bash +curl "https://api.shodan.io/shodan/host/?key=$SHODAN_KEY" +``` + +### Response Fields +| Field | Description | +|-------|-------------| +| `ports` | Open ports list | +| `os` | Operating system | +| `org` | Organization | +| `asn` | Autonomous system number | +| `hostnames` | Associated hostnames | + +## IOC Confidence Scoring Framework + +| Score | Disposition | Criteria | +|-------|-------------|----------| +| >= 70 | BLOCK | 15+ VT detections, AbuseIPDB >= 70%, or MalwareBazaar match | +| 40-69 | MONITOR | 5-14 VT detections, moderate abuse score | +| < 40 | INVESTIGATE | Low detection, no campaign attribution | + +## Defanging Convention + +| Original | Defanged | +|----------|----------| +| `http://` | `hxxp://` | +| `https://` | `hxxps://` | +| `.com` | `[.]com` | +| `evil.com` | `evil[.]com` | diff --git a/personas/_shared/skills/analyzing-indicators-of-compromise/scripts/agent.py b/personas/_shared/skills/analyzing-indicators-of-compromise/scripts/agent.py new file mode 100644 index 0000000..8c4d389 --- /dev/null +++ b/personas/_shared/skills/analyzing-indicators-of-compromise/scripts/agent.py @@ -0,0 +1,251 @@ +#!/usr/bin/env python3 +"""IOC analysis and enrichment agent using VirusTotal, AbuseIPDB, and MalwareBazaar APIs.""" + +import re +import os +import json +import datetime + +try: + import requests + HAS_REQUESTS = True +except ImportError: + HAS_REQUESTS = False + + +def classify_ioc(value): + """Classify an IOC by type: ipv4, domain, url, sha256, sha1, md5, email.""" + value = value.strip() + if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", value): + return "ipv4" + if re.match(r"^[a-fA-F0-9]{64}$", value): + return "sha256" + if re.match(r"^[a-fA-F0-9]{40}$", value): + return "sha1" + if re.match(r"^[a-fA-F0-9]{32}$", value): + return "md5" + if re.match(r"^https?://", value): + return "url" + if re.match(r"^[^@]+@[^@]+\.[^@]+$", value): + return "email" + if re.match(r"^[a-zA-Z0-9][a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$", value): + return "domain" + return "unknown" + + +def defang_ioc(value): + """Defang an IOC for safe documentation.""" + value = value.replace("http://", "hxxp://") + value = value.replace("https://", "hxxps://") + value = re.sub(r"\.(?=\w)", "[.]", value) + return value + + +def refang_ioc(value): + """Refang a defanged IOC for querying APIs.""" + value = value.replace("hxxp://", "http://") + value = value.replace("hxxps://", "https://") + value = value.replace("[.]", ".") + value = value.replace("[://]", "://") + return value + + +def is_private_ip(ip): + """Check if an IP is RFC 1918 private.""" + octets = [int(o) for o in ip.split(".")] + if octets[0] == 10: + return True + if octets[0] == 172 and 16 <= octets[1] <= 31: + return True + if octets[0] == 192 and octets[1] == 168: + return True + if octets[0] == 127: + return True + return False + + +def query_virustotal_hash(sha256, api_key): + """Query VirusTotal for a file hash.""" + url = f"https://www.virustotal.com/api/v3/files/{sha256}" + resp = requests.get(url, headers={"x-apikey": api_key}, timeout=30) + if resp.status_code == 200: + data = resp.json().get("data", {}).get("attributes", {}) + stats = data.get("last_analysis_stats", {}) + return { + "sha256": sha256, + "malicious": stats.get("malicious", 0), + "total": sum(stats.values()), + "type_description": data.get("type_description", ""), + "popular_threat_name": data.get("popular_threat_classification", {}).get( + "suggested_threat_label", ""), + "tags": data.get("tags", []), + } + return None + + +def query_virustotal_domain(domain, api_key): + """Query VirusTotal for domain reputation.""" + url = f"https://www.virustotal.com/api/v3/domains/{domain}" + resp = requests.get(url, headers={"x-apikey": api_key}, timeout=30) + if resp.status_code == 200: + data = resp.json().get("data", {}).get("attributes", {}) + stats = data.get("last_analysis_stats", {}) + return { + "domain": domain, + "malicious": stats.get("malicious", 0), + "suspicious": stats.get("suspicious", 0), + "reputation": data.get("reputation", 0), + "registrar": data.get("registrar", ""), + "creation_date": data.get("creation_date", ""), + } + return None + + +def query_abuseipdb(ip, api_key, max_age_days=90): + """Query AbuseIPDB for IP reputation.""" + url = "https://api.abuseipdb.com/api/v2/check" + resp = requests.get(url, headers={"Key": api_key, "Accept": "application/json"}, + params={"ipAddress": ip, "maxAgeInDays": max_age_days}, timeout=30) + if resp.status_code == 200: + data = resp.json().get("data", {}) + return { + "ip": ip, + "abuse_confidence": data.get("abuseConfidenceScore", 0), + "total_reports": data.get("totalReports", 0), + "country": data.get("countryCode", ""), + "isp": data.get("isp", ""), + "domain": data.get("domain", ""), + "is_tor": data.get("isTor", False), + } + return None + + +def query_malwarebazaar(sha256): + """Query MalwareBazaar for file hash information.""" + url = "https://mb-api.abuse.ch/api/v1/" + resp = requests.post(url, data={"query": "get_info", "hash": sha256}, timeout=30) + if resp.status_code == 200: + result = resp.json() + if result.get("query_status") == "ok" and result.get("data"): + entry = result["data"][0] + return { + "sha256": sha256, + "signature": entry.get("signature", ""), + "tags": entry.get("tags", []), + "file_type": entry.get("file_type", ""), + "reporter": entry.get("reporter", ""), + "first_seen": entry.get("first_seen", ""), + } + return None + + +def score_ioc(vt_result=None, abuse_result=None, mb_result=None): + """Assign a confidence score and disposition to an IOC.""" + score = 0 + reasons = [] + if vt_result: + malicious = vt_result.get("malicious", 0) + if malicious >= 15: + score += 40 + reasons.append(f"VT: {malicious} detections (high)") + elif malicious >= 5: + score += 20 + reasons.append(f"VT: {malicious} detections (moderate)") + elif malicious > 0: + score += 5 + reasons.append(f"VT: {malicious} detections (low)") + if abuse_result: + abuse_score = abuse_result.get("abuse_confidence", 0) + if abuse_score >= 70: + score += 30 + reasons.append(f"AbuseIPDB: {abuse_score}% confidence") + elif abuse_score >= 30: + score += 15 + reasons.append(f"AbuseIPDB: {abuse_score}% confidence") + if mb_result: + score += 30 + reasons.append(f"MalwareBazaar: {mb_result.get('signature', 'known malware')}") + + if score >= 70: + disposition = "BLOCK" + elif score >= 40: + disposition = "MONITOR" + else: + disposition = "INVESTIGATE" + + return {"score": score, "disposition": disposition, "reasons": reasons} + + +def enrich_ioc(value, vt_key=None, abuse_key=None): + """Enrich a single IOC with multi-source intelligence.""" + ioc_type = classify_ioc(value) + result = { + "ioc": value, + "type": ioc_type, + "defanged": defang_ioc(value), + "enrichment": {}, + "timestamp": datetime.datetime.utcnow().isoformat() + "Z", + } + if not HAS_REQUESTS: + result["error"] = "requests library not installed" + return result + if ioc_type == "ipv4" and is_private_ip(value): + result["note"] = "RFC 1918 private IP - skipping external enrichment" + return result + if ioc_type in ("sha256", "sha1", "md5") and vt_key: + result["enrichment"]["virustotal"] = query_virustotal_hash(value, vt_key) + result["enrichment"]["malwarebazaar"] = query_malwarebazaar(value) + elif ioc_type == "ipv4": + if abuse_key: + result["enrichment"]["abuseipdb"] = query_abuseipdb(value, abuse_key) + if vt_key: + result["enrichment"]["virustotal"] = query_virustotal_domain(value, vt_key) + elif ioc_type == "domain" and vt_key: + result["enrichment"]["virustotal"] = query_virustotal_domain(value, vt_key) + + scoring = score_ioc( + result["enrichment"].get("virustotal"), + result["enrichment"].get("abuseipdb"), + result["enrichment"].get("malwarebazaar"), + ) + result["score"] = scoring["score"] + result["disposition"] = scoring["disposition"] + result["reasons"] = scoring["reasons"] + return result + + +if __name__ == "__main__": + print("=" * 60) + print("IOC Analysis & Enrichment Agent") + print("VirusTotal, AbuseIPDB, MalwareBazaar integration") + print("=" * 60) + + demo_iocs = [ + "185.220.101.42", + "evil-domain.com", + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "http://malicious-site.com/payload.exe", + "192.168.1.100", + ] + + print("\n--- IOC Classification & Defanging ---") + for ioc in demo_iocs: + ioc_type = classify_ioc(ioc) + defanged = defang_ioc(ioc) + private = " (private)" if ioc_type == "ipv4" and is_private_ip(ioc) else "" + print(f" {ioc_type:8s} | {defanged}{private}") + + vt_key = os.environ.get("VT_API_KEY") + abuse_key = os.environ.get("ABUSEIPDB_API_KEY") + + if vt_key or abuse_key: + print("\n--- Enrichment (live API queries) ---") + for ioc in demo_iocs: + result = enrich_ioc(ioc, vt_key, abuse_key) + print(f"\n {result['ioc']} ({result['type']})") + print(f" Disposition: {result.get('disposition', 'N/A')} " + f"(score: {result.get('score', 0)})") + for reason in result.get("reasons", []): + print(f" - {reason}") + else: + print("\n[*] Set VT_API_KEY and/or ABUSEIPDB_API_KEY environment variables for live enrichment.") diff --git a/personas/_shared/skills/analyzing-ios-app-security-with-objection/LICENSE b/personas/_shared/skills/analyzing-ios-app-security-with-objection/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-ios-app-security-with-objection/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-ios-app-security-with-objection/SKILL.md b/personas/_shared/skills/analyzing-ios-app-security-with-objection/SKILL.md new file mode 100644 index 0000000..3dea359 --- /dev/null +++ b/personas/_shared/skills/analyzing-ios-app-security-with-objection/SKILL.md @@ -0,0 +1,204 @@ +--- +name: analyzing-ios-app-security-with-objection +description: 'Performs runtime mobile security exploration of iOS applications using Objection, a Frida-powered toolkit that + enables security testers to interact with app internals without jailbreaking. Use when assessing iOS app security posture, + bypassing client-side protections, dumping keychain items, inspecting filesystem storage, and evaluating runtime behavior. + Activates for requests involving iOS security testing, Objection runtime analysis, Frida-based iOS assessment, or mobile + runtime exploration. + + ' +domain: cybersecurity +subdomain: mobile-security +author: mahipal +tags: +- mobile-security +- ios +- objection +- frida +- owasp-mobile +- penetration-testing +version: 1.0.0 +license: Apache-2.0 +atlas_techniques: +- AML.T0054 +nist_ai_rmf: +- MEASURE-2.7 +- MANAGE-2.4 +- GOVERN-6.2 +- MAP-5.1 +nist_csf: +- PR.PS-01 +- PR.AA-05 +- ID.RA-01 +- DE.CM-09 +--- +# Analyzing iOS App Security with Objection + +## When to Use + +Use this skill when: +- Performing runtime security assessment of iOS applications during authorized penetration tests +- Inspecting iOS keychain, filesystem, and memory for sensitive data exposure +- Bypassing client-side security controls (SSL pinning, jailbreak detection) during security testing +- Evaluating iOS app behavior at runtime without access to source code + +**Do not use** this skill on production devices without explicit authorization -- Objection modifies app runtime behavior and may trigger security monitoring. + +## Prerequisites + +- Python 3.10+ with pip +- Objection installed: `pip install objection` +- Frida installed: `pip install frida-tools` +- Target iOS device (jailbroken with Frida server, or non-jailbroken with repackaged IPA) +- For non-jailbroken: `objection patchipa` to inject Frida gadget into IPA +- macOS recommended for iOS testing (Xcode, ideviceinstaller) +- USB connection to target device or network Frida server + +## Workflow + +### Step 1: Prepare the Testing Environment + +**For jailbroken devices:** +```bash +# Install Frida server on device via Cydia/Sileo +# SSH to device and start Frida server +ssh root@ "/usr/sbin/frida-server -D" + +# Verify Frida connectivity +frida-ps -U # List processes on USB-connected device +``` + +**For non-jailbroken devices (authorized testing):** +```bash +# Patch IPA with Frida gadget +objection patchipa --source target.ipa --codesign-signature "Apple Development: test@example.com" + +# Install patched IPA +ideviceinstaller -i target-patched.ipa +``` + +### Step 2: Attach Objection to Target App + +```bash +# Attach to running app by bundle ID +objection --gadget "com.target.app" explore + +# Or spawn the app fresh +objection --gadget "com.target.app" explore --startup-command "ios hooking list classes" +``` + +Once attached, Objection provides an interactive REPL for runtime exploration. + +### Step 3: Assess Data Storage Security (MASVS-STORAGE) + +```bash +# Dump iOS Keychain items accessible to the app +ios keychain dump + +# List files in app sandbox +ios plist cat Info.plist +env # Show app environment paths + +# Inspect NSUserDefaults for sensitive data +ios nsuserdefaults get + +# List SQLite databases +sqlite connect app_data.db +sqlite execute query "SELECT * FROM credentials" + +# Check for sensitive data in pasteboard +ios pasteboard monitor +``` + +### Step 4: Evaluate Network Security (MASVS-NETWORK) + +```bash +# Disable SSL/TLS certificate pinning +ios sslpinning disable + +# Verify pinning is bypassed by observing traffic in Burp Suite proxy +# Monitor network-related class method calls +ios hooking watch class NSURLSession +ios hooking watch class NSURLConnection +``` + +### Step 5: Inspect Authentication and Authorization (MASVS-AUTH) + +```bash +# List all Objective-C classes +ios hooking list classes + +# Search for authentication-related classes +ios hooking search classes Auth +ios hooking search classes Login +ios hooking search classes Token + +# Hook authentication methods to observe parameters +ios hooking watch method "+[AuthManager validateToken:]" --dump-args --dump-return + +# Monitor biometric authentication calls +ios hooking watch class LAContext +``` + +### Step 6: Assess Binary Protections (MASVS-RESILIENCE) + +```bash +# Check jailbreak detection implementation +ios jailbreak disable + +# Simulate jailbreak detection bypass +ios jailbreak simulate + +# List loaded frameworks and libraries +memory list modules + +# Search memory for sensitive strings +memory search "password" --string +memory search "api_key" --string +memory search "Bearer" --string + +# Dump specific memory regions +memory dump all dump_output/ +``` + +### Step 7: Review Platform Interaction (MASVS-PLATFORM) + +```bash +# List URL schemes registered by the app +ios info binary +ios bundles list_frameworks + +# Hook URL scheme handlers +ios hooking watch method "-[AppDelegate application:openURL:options:]" --dump-args + +# Monitor clipboard access +ios pasteboard monitor + +# Check for custom keyboard restrictions +ios hooking search classes UITextField +``` + +## Key Concepts + +| Term | Definition | +|------|-----------| +| **Objection** | Runtime mobile exploration toolkit built on Frida that provides pre-built scripts for common security testing tasks | +| **Frida Gadget** | Shared library injected into app process to enable Frida instrumentation without jailbreak | +| **Keychain** | iOS secure credential storage system; Objection can dump items accessible to the target app's keychain access group | +| **SSL Pinning Bypass** | Runtime modification of certificate validation logic to allow proxy interception of HTTPS traffic | +| **Method Hooking** | Intercepting Objective-C/Swift method calls at runtime to observe arguments, return values, and modify behavior | + +## Tools & Systems + +- **Objection**: High-level Frida-powered mobile security exploration toolkit with pre-built commands +- **Frida**: Dynamic instrumentation framework providing JavaScript injection into native app processes +- **Frida-tools**: CLI utilities for Frida including frida-ps, frida-trace, and frida-discover +- **ideviceinstaller**: Cross-platform tool for installing/managing iOS apps via USB +- **Burp Suite**: HTTP proxy for intercepting traffic after SSL pinning bypass + +## Common Pitfalls + +- **App crashes on attach**: Some apps implement Frida detection. Use `--startup-command` to hook anti-Frida checks early in the app lifecycle. +- **Keychain access scope**: Objection can only dump keychain items within the app's access group. System keychain items require separate jailbreak-level tools. +- **Swift name mangling**: Swift method names are mangled in the runtime. Use `ios hooking list classes` with grep to find demangled names. +- **Non-persistent changes**: All Objection modifications are runtime-only and reset on app restart. Document findings immediately. diff --git a/personas/_shared/skills/analyzing-ios-app-security-with-objection/assets/template.md b/personas/_shared/skills/analyzing-ios-app-security-with-objection/assets/template.md new file mode 100644 index 0000000..5dcfbe7 --- /dev/null +++ b/personas/_shared/skills/analyzing-ios-app-security-with-objection/assets/template.md @@ -0,0 +1,80 @@ +# iOS Objection Security Assessment Report + +## Engagement Information + +| Field | Value | +|-------|-------| +| Application | [APP_NAME] | +| Bundle ID | [BUNDLE_ID] | +| iOS Version | [IOS_VERSION] | +| Device | [DEVICE_MODEL] | +| Device State | [Jailbroken/Non-Jailbroken] | +| Assessment Date | [DATE] | +| Analyst | [ANALYST] | +| Objection Version | [VERSION] | + +## Executive Summary + +[Brief narrative of findings from Objection runtime analysis] + +## Keychain Analysis + +| Service | Account | Data Type | Protection Class | Risk | +|---------|---------|-----------|-----------------|------| +| [SERVICE] | [ACCOUNT] | [TYPE] | [CLASS] | [RISK] | + +**Findings**: [Description of sensitive data found in keychain] + +## Data Storage Assessment + +### NSUserDefaults +| Key | Contains Sensitive Data | Risk | +|-----|----------------------|------| +| [KEY] | [YES/NO] | [RISK] | + +### SQLite Databases +| Database | Encrypted | Sensitive Tables | Risk | +|----------|-----------|-----------------|------| +| [DB_NAME] | [YES/NO] | [TABLES] | [RISK] | + +### Filesystem +| Path | Contents | Protection | Risk | +|------|----------|-----------|------| +| [PATH] | [DESCRIPTION] | [ATTRIBUTE] | [RISK] | + +## Network Security + +| Check | Result | Details | +|-------|--------|---------| +| SSL Pinning Present | [YES/NO] | [IMPLEMENTATION_DETAILS] | +| SSL Pinning Bypass | [SUCCESS/FAIL] | [METHOD_USED] | +| ATS Configuration | [STRICT/RELAXED] | [EXCEPTIONS] | + +## Binary Protection Assessment + +| Protection | Status | Details | +|-----------|--------|---------| +| Jailbreak Detection | [Present/Absent] | [BYPASS_DIFFICULTY] | +| Frida Detection | [Present/Absent] | [DETAILS] | +| Debug Detection | [Present/Absent] | [DETAILS] | +| Code Obfuscation | [Yes/No] | [DETAILS] | + +## Memory Analysis + +| Search Pattern | Found | Risk | Details | +|---------------|-------|------|---------| +| Passwords | [YES/NO] | [RISK] | [DETAILS] | +| Auth Tokens | [YES/NO] | [RISK] | [DETAILS] | +| API Keys | [YES/NO] | [RISK] | [DETAILS] | +| JWTs | [YES/NO] | [RISK] | [DETAILS] | + +## Recommendations + +### Critical +1. [RECOMMENDATION] + +### High +1. [RECOMMENDATION] + +### Medium +1. [RECOMMENDATION] diff --git a/personas/_shared/skills/analyzing-ios-app-security-with-objection/references/api-reference.md b/personas/_shared/skills/analyzing-ios-app-security-with-objection/references/api-reference.md new file mode 100644 index 0000000..d77e4fb --- /dev/null +++ b/personas/_shared/skills/analyzing-ios-app-security-with-objection/references/api-reference.md @@ -0,0 +1,105 @@ +# API Reference: iOS App Security with Objection + +## Objection CLI + +### Launch +```bash +objection -g com.example.app explore # Attach to running app +objection -g com.example.app explore -s "command" # Run startup command +objection patchipa --source app.ipa # Patch IPA with Frida gadget +``` + +### Keychain & Data Storage +```bash +ios keychain dump # Dump keychain items +ios keychain dump --json # JSON output +ios cookies get # List HTTP cookies +ios nsuserdefaults get # Read NSUserDefaults +ios plist cat Info.plist # Read plist file +``` + +### SSL Pinning +```bash +ios sslpinning disable # Bypass SSL pinning +ios sslpinning disable --quiet # Quiet mode +``` + +### Jailbreak Detection +```bash +ios jailbreak disable # Bypass jailbreak detection +ios jailbreak simulate # Simulate jailbroken device +``` + +### Hooking +```bash +ios hooking list classes # List all classes +ios hooking list classes --include Auth # Filter classes +ios hooking list class_methods ClassName # List methods +ios hooking watch method "-[Class method]" # Watch method calls +ios hooking set return_value "-[Class isJB]" false # Override return +``` + +### Filesystem +```bash +ls / # List app sandbox root +ls /Documents # List Documents directory +file download /path/to/file local.out # Download file +file upload local.file /remote/path # Upload file +``` + +### Memory +```bash +memory dump all dump.bin # Dump all memory +memory search "password" # Search memory for string +memory list modules # List loaded modules +memory list exports libModule.dylib # List module exports +``` + +## Frida CLI + +### Syntax +```bash +frida -U -n AppName # Attach by name +frida -U -f com.app.id # Spawn and attach +frida -U -n AppName -l script.js # Load script +frida-ps -U # List running processes +frida-ls-devices # List connected devices +``` + +### Common Frida Scripts +```javascript +// Hook method and log arguments +ObjC.choose(ObjC.classes.ClassName, { + onMatch: function(instance) { + Interceptor.attach(instance['- methodName:'].implementation, { + onEnter: function(args) { + console.log('arg1:', ObjC.Object(args[2])); + } + }); + }, onComplete: function() {} +}); +``` + +## OWASP Mobile Top 10 (2024) + +| ID | Category | Objection Check | +|----|----------|-----------------| +| M1 | Improper Credential Usage | `ios keychain dump` | +| M2 | Inadequate Supply Chain Security | Binary analysis | +| M3 | Insecure Authentication | Hook auth classes | +| M4 | Insufficient Input/Output Validation | Hook input methods | +| M5 | Insecure Communication | `ios sslpinning disable` | +| M6 | Inadequate Privacy Controls | `ios nsuserdefaults get` | +| M7 | Insufficient Binary Protections | Check PIE, ARC, stack canary | +| M8 | Security Misconfiguration | `ios plist cat Info.plist` | +| M9 | Insecure Data Storage | Filesystem + keychain review | +| M10 | Insufficient Cryptography | Hook crypto classes | + +## iOS App Sandbox Paths +| Path | Contents | +|------|----------| +| `/Documents` | User-generated data | +| `/Library/Caches` | Cached data | +| `/Library/Preferences` | Plist settings | +| `/tmp` | Temporary files | +| `/Library/Cookies` | Cookie storage | diff --git a/personas/_shared/skills/analyzing-ios-app-security-with-objection/references/standards.md b/personas/_shared/skills/analyzing-ios-app-security-with-objection/references/standards.md new file mode 100644 index 0000000..f8ee5a4 --- /dev/null +++ b/personas/_shared/skills/analyzing-ios-app-security-with-objection/references/standards.md @@ -0,0 +1,43 @@ +# Standards Reference: iOS App Security with Objection + +## OWASP Mobile Top 10 2024 Mapping + +| OWASP ID | Risk | Objection Testing Coverage | +|----------|------|---------------------------| +| M1 | Improper Credential Usage | Keychain dumping, memory string search for hardcoded credentials | +| M3 | Insecure Authentication/Authorization | Hook authentication methods, bypass biometric checks | +| M5 | Insecure Communication | SSL pinning bypass, network class hooking | +| M7 | Insufficient Binary Protections | Jailbreak detection bypass, Frida detection assessment | +| M8 | Security Misconfiguration | Info.plist review, URL scheme analysis, ATS configuration | +| M9 | Insecure Data Storage | NSUserDefaults inspection, SQLite database access, file system review | + +## OWASP MASVS v2.0 Control Mapping + +| MASVS Category | Objection Commands | Assessment Area | +|----------------|-------------------|-----------------| +| MASVS-STORAGE | `ios keychain dump`, `ios nsuserdefaults get`, `sqlite connect` | Sensitive data in keychain, NSUserDefaults, databases | +| MASVS-CRYPTO | `memory search`, hook crypto framework calls | Key storage, algorithm selection | +| MASVS-AUTH | Hook LAContext, authentication classes | Biometric bypass, session management | +| MASVS-NETWORK | `ios sslpinning disable`, hook NSURLSession | Certificate pinning, cleartext traffic | +| MASVS-PLATFORM | Hook URL scheme handlers, pasteboard monitor | Deep link security, clipboard exposure | +| MASVS-CODE | `memory list modules`, binary inspection | Debugging symbols, framework analysis | +| MASVS-RESILIENCE | `ios jailbreak disable`, Frida detection hooks | Anti-tampering, anti-debugging | + +## OWASP MASTG Test Cases + +| Test ID | Description | Objection Approach | +|---------|-------------|-------------------| +| MASTG-TEST-0053 | Testing Local Storage for Sensitive Data | `ios keychain dump`, filesystem inspection | +| MASTG-TEST-0057 | Testing Backups for Sensitive Data | Check backup exclusion attributes | +| MASTG-TEST-0060 | Testing Custom URL Schemes | Hook `application:openURL:options:` | +| MASTG-TEST-0063 | Testing for Sensitive Data in Logs | Monitor NSLog calls via hooking | +| MASTG-TEST-0066 | Testing Enforced App Transport Security | Inspect Info.plist ATS configuration | + +## Apple Platform Security Requirements + +| Requirement | Assessment Method | +|-------------|-------------------| +| Keychain Access Control | Verify kSecAttrAccessible values via keychain dump | +| App Transport Security | Check Info.plist for NSAllowsArbitraryLoads exceptions | +| Data Protection API | Verify file protection attributes on sensitive files | +| Secure Enclave Usage | Hook SecKey operations for biometric-protected keys | diff --git a/personas/_shared/skills/analyzing-ios-app-security-with-objection/references/workflows.md b/personas/_shared/skills/analyzing-ios-app-security-with-objection/references/workflows.md new file mode 100644 index 0000000..aeae0a4 --- /dev/null +++ b/personas/_shared/skills/analyzing-ios-app-security-with-objection/references/workflows.md @@ -0,0 +1,83 @@ +# Workflows: iOS App Security with Objection + +## Workflow 1: iOS Runtime Security Assessment + +``` +[Setup Environment] --> [Prepare Device] --> [Attach Objection] --> [Runtime Analysis] + | | | | + v v v v +[Install Frida] [Jailbroken: Start [Connect via USB] [Data Storage Check] +[Install Objection] frida-server] [Spawn target app] [Network Security] + [Non-JB: Patch IPA] [Auth Mechanism Review] + [Binary Protection Test] + | + v + [Document Findings] + [Generate Report] +``` + +## Workflow 2: SSL Pinning Bypass for Traffic Interception + +``` +[Configure Burp Proxy] --> [Set device proxy] --> [Attach Objection] + | + v + [ios sslpinning disable] + | + v + [Navigate app in browser/UI] + | + v + [Capture HTTPS traffic in Burp] + [Analyze API endpoints] + [Test authentication flows] + [Check for sensitive data in transit] +``` + +## Workflow 3: Keychain and Data Storage Assessment + +``` +[Attach Objection] --> [ios keychain dump] --> [Analyze keychain items] + | | + v v + [ios nsuserdefaults get] [Check protection classes] + | [Identify sensitive tokens] + v [Verify encryption at rest] + [List app sandbox files] + | + v + [sqlite connect *.db] + [Query sensitive tables] + | + v + [memory search "password"] + [memory search "token"] + [memory search "secret"] +``` + +## Workflow 4: Jailbreak Detection Assessment + +``` +[Attach Objection] --> [ios jailbreak disable] --> [Navigate app] + | | + v [App functions normally?] + [Hook detection methods] / \ + [Monitor file checks] [Yes] [No] + [Monitor Cydia URL scheme] | | + | [Detection [Additional detection + v bypassed] methods exist] + [Document detection | + methods found] [Hook deeper: search + [Assess bypass for custom checks] + difficulty] [Frida script for + targeted bypass] +``` + +## Decision Matrix: Testing Approach + +| Device State | IPA Access | Approach | +|-------------|-----------|----------| +| Jailbroken | Not needed | Direct Frida server + Objection attach | +| Non-jailbroken | Available | Patch IPA with `objection patchipa` | +| Non-jailbroken | Not available | Request IPA from client or use device management | +| Emulator | N/A | Limited: Frida on Corellium or similar platform | diff --git a/personas/_shared/skills/analyzing-ios-app-security-with-objection/scripts/agent.py b/personas/_shared/skills/analyzing-ios-app-security-with-objection/scripts/agent.py new file mode 100644 index 0000000..e6ef52d --- /dev/null +++ b/personas/_shared/skills/analyzing-ios-app-security-with-objection/scripts/agent.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python3 +"""iOS app security analysis agent using Objection/Frida concepts. + +Performs runtime security assessment of iOS apps including SSL pinning bypass, +keychain dumping, filesystem inspection, and jailbreak detection bypass. +""" + +import subprocess +import json +import sys + + +def run_objection(command, app_id=None, timeout=30): + """Execute an Objection command against a target app.""" + cmd = ["objection"] + if app_id: + cmd.extend(["-g", app_id]) + cmd.extend(["explore", "-c", command]) + try: + result = subprocess.run(cmd, capture_output=True, text=True, timeout=timeout) + return result.stdout, result.returncode + except FileNotFoundError: + return "objection not installed (pip install objection)", 1 + except subprocess.TimeoutExpired: + return "Command timed out", 1 + + +def run_frida(script_code, app_id, timeout=30): + """Execute a Frida script against target app.""" + cmd = ["frida", "-U", "-n", app_id, "-e", script_code] + try: + result = subprocess.run(cmd, capture_output=True, text=True, timeout=timeout) + return result.stdout, result.returncode + except FileNotFoundError: + return "frida not installed (pip install frida-tools)", 1 + except subprocess.TimeoutExpired: + return "Command timed out", 1 + + +def dump_keychain(app_id): + """Dump keychain items accessible by the application.""" + return run_objection("ios keychain dump", app_id) + + +def dump_cookies(app_id): + """Dump HTTP cookies stored by the application.""" + return run_objection("ios cookies get", app_id) + + +def list_classes(app_id, filter_str=None): + """List Objective-C classes loaded in the app.""" + cmd = "ios hooking list classes" + if filter_str: + cmd += f" --include {filter_str}" + return run_objection(cmd, app_id) + + +def check_ssl_pinning(app_id): + """Check and bypass SSL certificate pinning.""" + return run_objection("ios sslpinning disable", app_id) + + +def check_jailbreak_detection(app_id): + """Check for and bypass jailbreak detection.""" + return run_objection("ios jailbreak disable", app_id) + + +def inspect_filesystem(app_id, path="/"): + """Inspect the application's filesystem sandbox.""" + return run_objection(f"ls {path}", app_id) + + +def dump_plist(app_id): + """Dump application plist configuration files.""" + return run_objection("ios plist cat Info.plist", app_id) + + +def check_pasteboard(app_id): + """Check pasteboard/clipboard for sensitive data.""" + return run_objection("ios pasteboard monitor", app_id) + + +def search_binary_strings(app_id, pattern): + """Search for strings in the app binary.""" + return run_objection(f"memory search '{pattern}'", app_id) + + +OWASP_MOBILE_CHECKS = { + "M1_Improper_Platform_Usage": { + "checks": ["ios keychain dump", "ios plist cat Info.plist"], + "description": "Check for misuse of platform security features", + }, + "M2_Insecure_Data_Storage": { + "checks": ["ios keychain dump", "ios cookies get", "ios nsuserdefaults get"], + "description": "Check for sensitive data in insecure storage", + }, + "M3_Insecure_Communication": { + "checks": ["ios sslpinning disable"], + "description": "Test SSL/TLS implementation and certificate pinning", + }, + "M4_Insecure_Authentication": { + "checks": ["ios hooking list classes --include Auth", + "ios hooking list classes --include Login"], + "description": "Analyze authentication mechanisms", + }, + "M5_Insufficient_Cryptography": { + "checks": ["ios hooking list classes --include Crypto", + "ios hooking list classes --include AES"], + "description": "Review cryptographic implementations", + }, + "M8_Code_Tampering": { + "checks": ["ios jailbreak disable"], + "description": "Test runtime integrity and jailbreak detection", + }, + "M9_Reverse_Engineering": { + "checks": ["ios hooking list classes"], + "description": "Assess reverse engineering protections", + }, +} + + +def run_owasp_assessment(app_id): + """Run OWASP Mobile Top 10 security checks.""" + results = {} + for category, config in OWASP_MOBILE_CHECKS.items(): + category_results = {"description": config["description"], "findings": []} + for check in config["checks"]: + output, rc = run_objection(check, app_id) + category_results["findings"].append({ + "command": check, + "status": "success" if rc == 0 else "failed", + "output_preview": output[:200] if output else "", + }) + results[category] = category_results + return results + + +FRIDA_SCRIPTS = { + "ssl_pinning_bypass": """ +ObjC.choose(ObjC.classes.NSURLSessionConfiguration, { + onMatch: function(instance) { + instance['- setTLSMinimumSupportedProtocol:'](0); + }, onComplete: function() {} +}); +""", + "jailbreak_bypass": """ +var paths = ['/Applications/Cydia.app', '/usr/sbin/sshd', '/etc/apt']; +Interceptor.attach(ObjC.classes.NSFileManager['- fileExistsAtPath:'].implementation, { + onEnter: function(args) { this.path = ObjC.Object(args[2]).toString(); }, + onLeave: function(retval) { + if (paths.some(p => this.path.includes(p))) retval.replace(0); + } +}); +""", + "keychain_dump": """ +var kSecClass = ObjC.classes.__NSDictionary.dictionaryWithObject_forKey_( + ObjC.classes.__NSCFConstantString.alloc().initWithUTF8String_('genp'), + ObjC.classes.__NSCFConstantString.alloc().initWithUTF8String_('class') +); +console.log('Keychain query prepared'); +""", +} + + +def generate_report(app_id, assessment_results): + """Generate iOS security assessment report.""" + findings_count = sum( + len(cat["findings"]) for cat in assessment_results.values() + ) + return { + "app_identifier": app_id, + "assessment_framework": "OWASP Mobile Top 10", + "categories_tested": len(assessment_results), + "total_checks": findings_count, + "results": assessment_results, + } + + +if __name__ == "__main__": + print("=" * 60) + print("iOS App Security Analysis Agent (Objection/Frida)") + print("Runtime analysis, SSL bypass, keychain dump, OWASP checks") + print("=" * 60) + + app_id = sys.argv[1] if len(sys.argv) > 1 else None + + if not app_id: + print("\n[DEMO] Usage: python agent.py ") + print(" e.g. python agent.py com.example.app") + print("\nAvailable checks:") + for category, config in OWASP_MOBILE_CHECKS.items(): + print(f" {category}: {config['description']}") + print("\nFrida scripts available:") + for name in FRIDA_SCRIPTS: + print(f" {name}") + sys.exit(0) + + print(f"\n[*] Target: {app_id}") + print("[*] Running OWASP Mobile Top 10 assessment...") + + results = run_owasp_assessment(app_id) + report = generate_report(app_id, results) + + for category, data in results.items(): + status_counts = {"success": 0, "failed": 0} + for f in data["findings"]: + status_counts[f["status"]] += 1 + print(f"\n [{category}] {data['description']}") + print(f" Checks: {status_counts['success']} passed, {status_counts['failed']} failed") + + print(f"\n{json.dumps(report, indent=2, default=str)}") diff --git a/personas/_shared/skills/analyzing-ios-app-security-with-objection/scripts/process.py b/personas/_shared/skills/analyzing-ios-app-security-with-objection/scripts/process.py new file mode 100644 index 0000000..e2362fe --- /dev/null +++ b/personas/_shared/skills/analyzing-ios-app-security-with-objection/scripts/process.py @@ -0,0 +1,294 @@ +#!/usr/bin/env python3 +""" +Objection iOS Security Assessment Automation + +Automates common Objection commands for iOS app security testing. +Runs keychain dump, storage inspection, SSL pinning check, and jailbreak detection analysis. + +Usage: + python process.py --bundle-id com.target.app [--device-id UDID] [--output report.json] +""" + +import argparse +import json +import subprocess +import sys +import re +from datetime import datetime +from pathlib import Path + + +class ObjectionAssessor: + """Automates Objection-based iOS security assessment tasks.""" + + def __init__(self, bundle_id: str, device_id: str = None): + self.bundle_id = bundle_id + self.device_id = device_id + self.findings = [] + + def _run_objection_command(self, command: str, timeout: int = 30) -> str: + """Execute an Objection command and return output.""" + cmd = ["objection", "--gadget", self.bundle_id, "run", command] + if self.device_id: + cmd.insert(1, "--serial") + cmd.insert(2, self.device_id) + + try: + result = subprocess.run( + cmd, + capture_output=True, + text=True, + timeout=timeout, + ) + return result.stdout + result.stderr + except subprocess.TimeoutExpired: + return f"TIMEOUT: Command '{command}' exceeded {timeout}s" + except FileNotFoundError: + return "ERROR: Objection not found. Install with: pip install objection" + + def _run_frida_command(self, script: str, timeout: int = 15) -> str: + """Execute a Frida script snippet.""" + cmd = ["frida", "-U", "-n", self.bundle_id, "-e", script] + if self.device_id: + cmd.extend(["-D", self.device_id]) + + try: + result = subprocess.run( + cmd, + capture_output=True, + text=True, + timeout=timeout, + ) + return result.stdout + except (subprocess.TimeoutExpired, FileNotFoundError): + return "" + + def check_frida_connectivity(self) -> dict: + """Verify Frida can connect to the device.""" + cmd = ["frida-ps", "-U"] + if self.device_id: + cmd.extend(["-D", self.device_id]) + + try: + result = subprocess.run(cmd, capture_output=True, text=True, timeout=10) + connected = result.returncode == 0 + processes = len(result.stdout.strip().split("\n")) - 1 if connected else 0 + return { + "connected": connected, + "process_count": processes, + "target_running": self.bundle_id in result.stdout, + } + except (subprocess.TimeoutExpired, FileNotFoundError): + return {"connected": False, "process_count": 0, "target_running": False} + + def dump_keychain(self) -> dict: + """Dump keychain items accessible to the app.""" + output = self._run_objection_command("ios keychain dump") + items = [] + current_item = {} + + for line in output.split("\n"): + line = line.strip() + if "Service" in line and ":" in line: + if current_item: + items.append(current_item) + current_item = {"service": line.split(":", 1)[-1].strip()} + elif "Account" in line and ":" in line: + current_item["account"] = line.split(":", 1)[-1].strip() + elif "Data" in line and ":" in line: + data = line.split(":", 1)[-1].strip() + current_item["data_preview"] = data[:50] + "..." if len(data) > 50 else data + current_item["data_length"] = len(data) + + if current_item: + items.append(current_item) + + finding = { + "check": "keychain_dump", + "category": "MASVS-STORAGE", + "owasp_mobile": "M9", + "items_found": len(items), + "items": items[:20], + "severity": "HIGH" if items else "INFO", + "description": f"Found {len(items)} keychain items accessible to the application", + } + self.findings.append(finding) + return finding + + def check_nsuserdefaults(self) -> dict: + """Inspect NSUserDefaults for sensitive data.""" + output = self._run_objection_command("ios nsuserdefaults get") + sensitive_patterns = [ + "password", "token", "secret", "key", "auth", + "session", "credential", "api_key", "apikey", + ] + + sensitive_entries = [] + for line in output.split("\n"): + line_lower = line.lower() + for pattern in sensitive_patterns: + if pattern in line_lower: + sensitive_entries.append(line.strip()) + break + + finding = { + "check": "nsuserdefaults", + "category": "MASVS-STORAGE", + "owasp_mobile": "M9", + "sensitive_entries": len(sensitive_entries), + "entries": sensitive_entries[:10], + "severity": "HIGH" if sensitive_entries else "PASS", + "description": f"Found {len(sensitive_entries)} potentially sensitive NSUserDefaults entries", + } + self.findings.append(finding) + return finding + + def check_ssl_pinning(self) -> dict: + """Assess SSL pinning implementation.""" + output = self._run_objection_command("ios sslpinning disable") + pinning_detected = "pinning" in output.lower() or "hook" in output.lower() + + finding = { + "check": "ssl_pinning", + "category": "MASVS-NETWORK", + "owasp_mobile": "M5", + "pinning_detected": pinning_detected, + "bypass_output": output[:500], + "severity": "MEDIUM" if not pinning_detected else "INFO", + "description": "SSL pinning " + ("detected and bypassed" if pinning_detected else "not detected"), + } + self.findings.append(finding) + return finding + + def check_jailbreak_detection(self) -> dict: + """Assess jailbreak detection implementation.""" + output = self._run_objection_command("ios jailbreak disable") + detection_found = "hook" in output.lower() or "bypass" in output.lower() + + finding = { + "check": "jailbreak_detection", + "category": "MASVS-RESILIENCE", + "owasp_mobile": "M7", + "detection_implemented": detection_found, + "bypass_output": output[:500], + "severity": "MEDIUM" if not detection_found else "INFO", + "description": "Jailbreak detection " + ("found" if detection_found else "not found or not implemented"), + } + self.findings.append(finding) + return finding + + def search_sensitive_memory(self) -> dict: + """Search app memory for sensitive strings.""" + patterns = ["password", "Bearer ", "eyJ", "api_key", "secret"] + memory_findings = [] + + for pattern in patterns: + output = self._run_objection_command(f'memory search "{pattern}" --string') + matches = output.count("Found") + if matches > 0: + memory_findings.append({ + "pattern": pattern, + "matches": matches, + }) + + finding = { + "check": "memory_search", + "category": "MASVS-STORAGE", + "owasp_mobile": "M9", + "patterns_with_matches": len(memory_findings), + "details": memory_findings, + "severity": "HIGH" if memory_findings else "PASS", + "description": f"Found sensitive patterns in memory for {len(memory_findings)} search terms", + } + self.findings.append(finding) + return finding + + def get_app_info(self) -> dict: + """Gather basic app information.""" + output = self._run_objection_command("ios info binary") + env_output = self._run_objection_command("env") + + return { + "bundle_id": self.bundle_id, + "binary_info": output[:1000], + "environment": env_output[:1000], + } + + def generate_report(self) -> dict: + """Generate consolidated assessment report.""" + severity_counts = {"HIGH": 0, "MEDIUM": 0, "LOW": 0, "INFO": 0, "PASS": 0} + for f in self.findings: + sev = f.get("severity", "INFO") + severity_counts[sev] = severity_counts.get(sev, 0) + 1 + + return { + "assessment": { + "target": self.bundle_id, + "date": datetime.now().isoformat(), + "tool": "Objection (Frida-powered)", + "type": "iOS Runtime Security Assessment", + }, + "summary": { + "total_checks": len(self.findings), + "severity_breakdown": severity_counts, + "critical_findings": [ + f for f in self.findings if f.get("severity") in ("HIGH", "CRITICAL") + ], + }, + "findings": self.findings, + } + + +def main(): + parser = argparse.ArgumentParser( + description="Objection iOS Security Assessment Automation" + ) + parser.add_argument("--bundle-id", required=True, help="iOS app bundle identifier") + parser.add_argument("--device-id", help="Device UDID for targeting specific device") + parser.add_argument("--output", default="objection_report.json", help="Output report path") + parser.add_argument("--checks", nargs="+", + default=["keychain", "nsuserdefaults", "ssl", "jailbreak", "memory"], + help="Checks to run") + args = parser.parse_args() + + assessor = ObjectionAssessor(args.bundle_id, args.device_id) + + # Verify connectivity + connectivity = assessor.check_frida_connectivity() + if not connectivity["connected"]: + print("[-] ERROR: Cannot connect to device via Frida") + print(" Ensure Frida server is running on device or IPA is patched") + sys.exit(1) + + print(f"[+] Connected to device. Target running: {connectivity['target_running']}") + + # Run selected checks + check_map = { + "keychain": assessor.dump_keychain, + "nsuserdefaults": assessor.check_nsuserdefaults, + "ssl": assessor.check_ssl_pinning, + "jailbreak": assessor.check_jailbreak_detection, + "memory": assessor.search_sensitive_memory, + } + + for check in args.checks: + if check in check_map: + print(f"[*] Running check: {check}") + result = check_map[check]() + print(f" Severity: {result['severity']} - {result['description']}") + + # Generate report + report = assessor.generate_report() + + with open(args.output, "w") as f: + json.dump(report, f, indent=2) + print(f"\n[+] Report saved: {args.output}") + + # Summary + high_count = report["summary"]["severity_breakdown"].get("HIGH", 0) + if high_count > 0: + print(f"[!] {high_count} HIGH severity findings require attention") + + +if __name__ == "__main__": + main() diff --git a/personas/_shared/skills/analyzing-kubernetes-audit-logs/LICENSE b/personas/_shared/skills/analyzing-kubernetes-audit-logs/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-kubernetes-audit-logs/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-kubernetes-audit-logs/SKILL.md b/personas/_shared/skills/analyzing-kubernetes-audit-logs/SKILL.md new file mode 100644 index 0000000..c8b26fa --- /dev/null +++ b/personas/_shared/skills/analyzing-kubernetes-audit-logs/SKILL.md @@ -0,0 +1,73 @@ +--- +name: analyzing-kubernetes-audit-logs +description: 'Parses Kubernetes API server audit logs (JSON lines) to detect exec-into-pod, secret access, RBAC modifications, + privileged pod creation, and anonymous API access. Builds threat detection rules from audit event patterns. Use when investigating + Kubernetes cluster compromise or building k8s-specific SIEM detection rules. + + ' +domain: cybersecurity +subdomain: container-security +tags: +- analyzing +- kubernetes +- audit +- logs +version: '1.0' +author: mahipal +license: Apache-2.0 +nist_csf: +- PR.PS-01 +- PR.IR-01 +- ID.AM-08 +- DE.CM-01 +--- + +# Analyzing Kubernetes Audit Logs + + +## When to Use + +- When investigating security incidents that require analyzing kubernetes audit logs +- When building detection rules or threat hunting queries for this domain +- When SOC analysts need structured procedures for this analysis type +- When validating security monitoring coverage for related attack techniques + +## Prerequisites + +- Familiarity with container security concepts and tools +- Access to a test or lab environment for safe execution +- Python 3.8+ with required dependencies installed +- Appropriate authorization for any testing activities + +## Instructions + +Parse Kubernetes audit log files (JSON lines format) to detect security-relevant +events including unauthorized access, privilege escalation, and data exfiltration. + +```python +import json + +with open("/var/log/kubernetes/audit.log") as f: + for line in f: + event = json.loads(line) + verb = event.get("verb") + resource = event.get("objectRef", {}).get("resource") + user = event.get("user", {}).get("username") + if verb == "create" and resource == "pods/exec": + print(f"Pod exec by {user}") +``` + +Key events to detect: +1. pods/exec and pods/attach (shell into containers) +2. secrets access (get/list/watch) +3. clusterrolebindings creation (RBAC escalation) +4. Privileged pod creation +5. Anonymous or system:unauthenticated access + +## Examples + +```python +# Detect secret enumeration +if verb in ("get", "list") and resource == "secrets": + print(f"Secret access: {user} -> {event['objectRef'].get('name')}") +``` diff --git a/personas/_shared/skills/analyzing-kubernetes-audit-logs/references/api-reference.md b/personas/_shared/skills/analyzing-kubernetes-audit-logs/references/api-reference.md new file mode 100644 index 0000000..334701b --- /dev/null +++ b/personas/_shared/skills/analyzing-kubernetes-audit-logs/references/api-reference.md @@ -0,0 +1,57 @@ +# API Reference: Analyzing Kubernetes Audit Logs + +## Audit Log Format (JSON Lines) + +```json +{ + "kind": "Event", + "apiVersion": "audit.k8s.io/v1", + "level": "RequestResponse", + "verb": "create", + "user": {"username": "admin", "groups": ["system:masters"]}, + "sourceIPs": ["10.0.0.5"], + "objectRef": { + "resource": "pods", + "subresource": "exec", + "namespace": "default", + "name": "web-pod" + }, + "responseStatus": {"code": 200}, + "requestReceivedTimestamp": "2025-03-15T14:00:00Z" +} +``` + +## Security-Critical Audit Events + +| Event | objectRef | Severity | +|-------|-----------|----------| +| Pod exec | `resource: pods, subresource: exec` | HIGH | +| Secret access | `resource: secrets, verb: get/list` | HIGH | +| RBAC change | `resource: clusterrolebindings` | CRITICAL | +| Privileged pod | `requestObject.spec.containers[].securityContext.privileged` | CRITICAL | +| Anonymous access | `user.username: system:anonymous` | CRITICAL | + +## Audit Policy Levels + +| Level | Captures | +|-------|----------| +| None | No logging | +| Metadata | Timestamp, user, verb, resource | +| Request | Metadata + request body | +| RequestResponse | Request + response body | + +## Python Parsing + +```python +import json +with open("audit.log") as f: + for line in f: + event = json.loads(line) + print(event["verb"], event["objectRef"]["resource"]) +``` + +### References + +- K8s Auditing: https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/ +- Audit policy: https://kubernetes.io/docs/reference/config-api/apiserver-audit.v1/ +- Datadog k8s audit: https://www.datadoghq.com/blog/monitor-kubernetes-audit-logs/ diff --git a/personas/_shared/skills/analyzing-kubernetes-audit-logs/scripts/agent.py b/personas/_shared/skills/analyzing-kubernetes-audit-logs/scripts/agent.py new file mode 100644 index 0000000..96d3f97 --- /dev/null +++ b/personas/_shared/skills/analyzing-kubernetes-audit-logs/scripts/agent.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python3 +"""Agent for analyzing Kubernetes audit logs for security threats.""" + +import json +import argparse +from collections import defaultdict +from datetime import datetime + + +def parse_audit_log(log_path): + """Parse Kubernetes audit log file (JSON lines format).""" + events = [] + with open(log_path) as f: + for line in f: + line = line.strip() + if not line: + continue + try: + events.append(json.loads(line)) + except json.JSONDecodeError: + continue + return events + + +def detect_pod_exec(events): + """Detect kubectl exec and attach events (shell access to pods).""" + findings = [] + for event in events: + obj_ref = event.get("objectRef", {}) + subresource = obj_ref.get("subresource", "") + if subresource in ("exec", "attach"): + findings.append({ + "timestamp": event.get("requestReceivedTimestamp", ""), + "user": event.get("user", {}).get("username", ""), + "groups": event.get("user", {}).get("groups", []), + "verb": event.get("verb", ""), + "namespace": obj_ref.get("namespace", ""), + "pod": obj_ref.get("name", ""), + "subresource": subresource, + "source_ip": event.get("sourceIPs", [""])[0], + "severity": "HIGH", + }) + return findings + + +def detect_secret_access(events): + """Detect access to Kubernetes secrets.""" + findings = [] + for event in events: + obj_ref = event.get("objectRef", {}) + if obj_ref.get("resource") != "secrets": + continue + verb = event.get("verb", "") + if verb not in ("get", "list", "watch", "create", "update", "delete"): + continue + findings.append({ + "timestamp": event.get("requestReceivedTimestamp", ""), + "user": event.get("user", {}).get("username", ""), + "verb": verb, + "namespace": obj_ref.get("namespace", ""), + "secret_name": obj_ref.get("name", ""), + "source_ip": event.get("sourceIPs", [""])[0], + "severity": "HIGH" if verb in ("list", "delete") else "MEDIUM", + }) + return findings + + +def detect_rbac_changes(events): + """Detect RBAC role and binding modifications.""" + rbac_resources = {"clusterroles", "clusterrolebindings", "roles", "rolebindings"} + findings = [] + for event in events: + obj_ref = event.get("objectRef", {}) + resource = obj_ref.get("resource", "") + verb = event.get("verb", "") + if resource in rbac_resources and verb in ("create", "update", "patch", "delete"): + findings.append({ + "timestamp": event.get("requestReceivedTimestamp", ""), + "user": event.get("user", {}).get("username", ""), + "verb": verb, + "resource": resource, + "name": obj_ref.get("name", ""), + "namespace": obj_ref.get("namespace", ""), + "source_ip": event.get("sourceIPs", [""])[0], + "severity": "CRITICAL" if "cluster" in resource else "HIGH", + }) + return findings + + +def detect_privileged_pods(events): + """Detect creation of privileged pods.""" + findings = [] + for event in events: + if event.get("verb") != "create": + continue + obj_ref = event.get("objectRef", {}) + if obj_ref.get("resource") != "pods": + continue + request_obj = event.get("requestObject", {}) + spec = request_obj.get("spec", {}) + containers = spec.get("containers", []) + for container in containers: + sc = container.get("securityContext", {}) + if sc.get("privileged"): + findings.append({ + "timestamp": event.get("requestReceivedTimestamp", ""), + "user": event.get("user", {}).get("username", ""), + "namespace": obj_ref.get("namespace", ""), + "pod": obj_ref.get("name", ""), + "container": container.get("name", ""), + "severity": "CRITICAL", + }) + return findings + + +def detect_anonymous_access(events): + """Detect API access by anonymous or unauthenticated users.""" + findings = [] + anon_users = {"system:anonymous", "system:unauthenticated"} + for event in events: + user = event.get("user", {}).get("username", "") + groups = event.get("user", {}).get("groups", []) + if user in anon_users or "system:unauthenticated" in groups: + status_code = event.get("responseStatus", {}).get("code", 0) + if status_code < 400: + findings.append({ + "timestamp": event.get("requestReceivedTimestamp", ""), + "user": user, + "verb": event.get("verb", ""), + "resource": event.get("objectRef", {}).get("resource", ""), + "source_ip": event.get("sourceIPs", [""])[0], + "status_code": status_code, + "severity": "CRITICAL", + }) + return findings + + +def detect_forbidden_surge(events, threshold=20): + """Detect 403 surges indicating enumeration or brute force.""" + user_forbidden = defaultdict(int) + for event in events: + if event.get("responseStatus", {}).get("code") == 403: + user = event.get("user", {}).get("username", "") + user_forbidden[user] += 1 + surges = [] + for user, count in user_forbidden.items(): + if count >= threshold: + surges.append({"user": user, "forbidden_count": count, "severity": "MEDIUM"}) + return sorted(surges, key=lambda x: x["forbidden_count"], reverse=True) + + +def main(): + parser = argparse.ArgumentParser(description="Kubernetes Audit Log Analyzer") + parser.add_argument("--audit-log", required=True, help="Path to audit log file") + parser.add_argument("--output", default="k8s_audit_report.json") + parser.add_argument("--action", choices=[ + "exec", "secrets", "rbac", "privileged", "anonymous", "full_analysis" + ], default="full_analysis") + args = parser.parse_args() + + events = parse_audit_log(args.audit_log) + report = {"log_file": args.audit_log, "total_events": len(events), + "generated_at": datetime.utcnow().isoformat(), "findings": {}} + print(f"[+] Parsed {len(events)} audit events") + + if args.action in ("exec", "full_analysis"): + findings = detect_pod_exec(events) + report["findings"]["pod_exec"] = findings + print(f"[+] Pod exec/attach events: {len(findings)}") + + if args.action in ("secrets", "full_analysis"): + findings = detect_secret_access(events) + report["findings"]["secret_access"] = findings + print(f"[+] Secret access events: {len(findings)}") + + if args.action in ("rbac", "full_analysis"): + findings = detect_rbac_changes(events) + report["findings"]["rbac_changes"] = findings + print(f"[+] RBAC changes: {len(findings)}") + + if args.action in ("privileged", "full_analysis"): + findings = detect_privileged_pods(events) + report["findings"]["privileged_pods"] = findings + print(f"[+] Privileged pod creation: {len(findings)}") + + if args.action in ("anonymous", "full_analysis"): + findings = detect_anonymous_access(events) + report["findings"]["anonymous_access"] = findings + print(f"[+] Anonymous access events: {len(findings)}") + + forbidden = detect_forbidden_surge(events) + report["findings"]["forbidden_surges"] = forbidden + print(f"[+] 403 surges: {len(forbidden)}") + + with open(args.output, "w") as f: + json.dump(report, f, indent=2, default=str) + print(f"[+] Report saved to {args.output}") + + +if __name__ == "__main__": + main() diff --git a/personas/_shared/skills/analyzing-linux-audit-logs-for-intrusion/LICENSE b/personas/_shared/skills/analyzing-linux-audit-logs-for-intrusion/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-linux-audit-logs-for-intrusion/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-linux-audit-logs-for-intrusion/SKILL.md b/personas/_shared/skills/analyzing-linux-audit-logs-for-intrusion/SKILL.md new file mode 100644 index 0000000..6fb404c --- /dev/null +++ b/personas/_shared/skills/analyzing-linux-audit-logs-for-intrusion/SKILL.md @@ -0,0 +1,268 @@ +--- +name: analyzing-linux-audit-logs-for-intrusion +description: 'Uses the Linux Audit framework (auditd) with ausearch and aureport utilities to detect intrusion attempts, unauthorized + access, privilege escalation, and suspicious system activity. Covers audit rule configuration, log querying, timeline reconstruction, + and integration with SIEM platforms. Activates for requests involving auditd analysis, Linux audit log investigation, ausearch + queries, aureport summaries, or host-based intrusion detection on Linux. + + ' +domain: cybersecurity +subdomain: incident-response +tags: +- auditd +- ausearch +- aureport +- linux-security +- intrusion-detection +- HIDS +- forensics +version: 1.0.0 +author: mahipal +license: Apache-2.0 +nist_csf: +- RS.MA-01 +- RS.MA-02 +- RS.AN-03 +- RC.RP-01 +--- + +# Analyzing Linux Audit Logs for Intrusion + +## When to Use + +- Investigating suspected unauthorized access or privilege escalation on Linux hosts +- Hunting for evidence of exploitation, backdoor installation, or persistence mechanisms +- Auditing compliance with security baselines (CIS, STIG, PCI-DSS) that require system call monitoring +- Reconstructing a timeline of attacker actions during incident response +- Detecting file tampering on critical system files such as `/etc/passwd`, `/etc/shadow`, or SSH keys + +**Do not use** for network-level intrusion detection; use Suricata or Zeek for network traffic analysis. Auditd operates at the kernel level on individual hosts. + +## Prerequisites + +- Linux system with `auditd` package installed and the audit daemon running (`systemctl status auditd`) +- Root or sudo access to configure audit rules and query logs +- Audit rules deployed via `/etc/audit/rules.d/*.rules` or loaded with `auditctl` +- Recommended: Neo23x0/auditd ruleset from GitHub for comprehensive baseline coverage +- Familiarity with Linux syscalls (`execve`, `open`, `connect`, `ptrace`, etc.) +- Log storage with sufficient retention (default location: `/var/log/audit/audit.log`) + +## Workflow + +### Step 1: Verify Audit Daemon Status and Configuration + +Confirm the audit system is running and check the current rule set: + +```bash +# Check auditd service status +systemctl status auditd + +# Show current audit rules loaded in the kernel +auditctl -l + +# Show audit daemon configuration +cat /etc/audit/auditd.conf | grep -E "log_file|max_log_file|num_logs|space_left_action" + +# Check if the audit backlog is being exceeded (dropped events) +auditctl -s +``` + +If the backlog limit is being reached, increase it: + +```bash +auditctl -b 8192 +``` + +### Step 2: Deploy Intrusion-Focused Audit Rules + +Add rules that target common intrusion indicators. Place these in `/etc/audit/rules.d/intrusion.rules`: + +```bash +# Monitor credential files for unauthorized reads or modifications +-w /etc/passwd -p wa -k credential_access +-w /etc/shadow -p rwa -k credential_access +-w /etc/gshadow -p rwa -k credential_access +-w /etc/sudoers -p wa -k privilege_escalation +-w /etc/sudoers.d/ -p wa -k privilege_escalation + +# Monitor SSH configuration and authorized keys +-w /etc/ssh/sshd_config -p wa -k sshd_config_change +-w /root/.ssh/authorized_keys -p wa -k ssh_key_tampering + +# Monitor user and group management commands +-w /usr/sbin/useradd -p x -k user_management +-w /usr/sbin/usermod -p x -k user_management +-w /usr/sbin/groupadd -p x -k user_management + +# Detect process injection via ptrace +-a always,exit -F arch=b64 -S ptrace -F a0=0x4 -k process_injection +-a always,exit -F arch=b64 -S ptrace -F a0=0x5 -k process_injection +-a always,exit -F arch=b64 -S ptrace -F a0=0x6 -k process_injection + +# Monitor execution of programs from unusual directories +-a always,exit -F arch=b64 -S execve -F exe=/tmp -k exec_from_tmp +-a always,exit -F arch=b64 -S execve -F exe=/dev/shm -k exec_from_shm + +# Detect kernel module loading (rootkit installation) +-a always,exit -F arch=b64 -S init_module -S finit_module -k kernel_module_load +-a always,exit -F arch=b64 -S delete_module -k kernel_module_remove +-w /sbin/insmod -p x -k kernel_module_tool +-w /sbin/modprobe -p x -k kernel_module_tool + +# Monitor network socket creation for reverse shells +-a always,exit -F arch=b64 -S socket -F a0=2 -k network_socket_created +-a always,exit -F arch=b64 -S connect -F a0=2 -k network_connection + +# Detect cron job modifications (persistence) +-w /etc/crontab -p wa -k cron_persistence +-w /etc/cron.d/ -p wa -k cron_persistence +-w /var/spool/cron/ -p wa -k cron_persistence + +# Monitor log deletion or tampering +-w /var/log/ -p wa -k log_tampering +``` + +Reload rules after editing: + +```bash +augenrules --load +auditctl -l | wc -l # Confirm rule count +``` + +### Step 3: Search for Intrusion Indicators with ausearch + +Use `ausearch` to query the audit log for specific events: + +```bash +# Search for all failed login attempts in the last 24 hours +ausearch -m USER_LOGIN --success no -ts recent + +# Search for commands executed by a specific user +ausearch -ua 1001 -m EXECVE -ts today + +# Search for all file access events on /etc/shadow +ausearch -f /etc/shadow -ts this-week + +# Search for privilege escalation via sudo +ausearch -m USER_CMD -ts today + +# Search for kernel module loading events +ausearch -k kernel_module_load -ts this-month + +# Search for processes executed from /tmp (common attack staging) +ausearch -k exec_from_tmp -ts this-week + +# Search for SSH key modifications +ausearch -k ssh_key_tampering -ts this-month + +# Search for a specific event by audit event ID +ausearch -a 12345 + +# Search events in a specific time range +ausearch -ts 03/15/2026 08:00:00 -te 03/15/2026 18:00:00 + +# Interpret syscall numbers and format output readably +ausearch -k credential_access -i -ts today +``` + +### Step 4: Generate Summary Reports with aureport + +Use `aureport` to produce aggregate summaries for triage: + +```bash +# Summary of all authentication events +aureport -au -ts this-week --summary + +# Report of all failed events (login, access, etc.) +aureport --failed --summary -ts today + +# Report of executable runs +aureport -x --summary -ts today + +# Report of all anomaly events (segfaults, promiscuous mode, etc.) +aureport --anomaly -ts this-week + +# Report of file access events +aureport -f --summary -ts today + +# Report of all events by key (maps to your custom rule keys) +aureport -k --summary -ts this-month + +# Report of all system calls +aureport -s --summary -ts today + +# Report of events grouped by user +aureport -u --summary -ts this-week + +# Detailed time-based event report for timeline building +aureport -ts 03/15/2026 08:00:00 -te 03/15/2026 18:00:00 --summary +``` + +### Step 5: Reconstruct the Attack Timeline + +Combine ausearch queries to build a chronological narrative: + +```bash +# Step 5a: Identify the initial access timestamp +ausearch -m USER_LOGIN -ua 0 --success yes -ts this-week -i | head -50 + +# Step 5b: Trace what the attacker did after gaining access +# Get all events from the compromised account within the incident window +ausearch -ua -ts "03/15/2026 14:00:00" -te "03/15/2026 18:00:00" -i \ + | aureport -f -i + +# Step 5c: Extract all commands executed during the incident window +ausearch -m EXECVE -ts "03/15/2026 14:00:00" -te "03/15/2026 18:00:00" -i + +# Step 5d: Check for persistence mechanisms installed +ausearch -k cron_persistence -ts "03/15/2026 14:00:00" -i +ausearch -k ssh_key_tampering -ts "03/15/2026 14:00:00" -i + +# Step 5e: Check for lateral movement (outbound connections) +ausearch -k network_connection -ts "03/15/2026 14:00:00" -i +``` + +### Step 6: Forward Audit Logs to SIEM + +Configure `audisp-remote` or `auditbeat` to ship logs to a central SIEM for correlation: + +```bash +# Option A: Using audisp-remote plugin +# Edit /etc/audit/plugins.d/au-remote.conf +active = yes +direction = out +path = /sbin/audisp-remote +type = always + +# Configure remote target in /etc/audit/audisp-remote.conf +remote_server = siem.internal.corp +port = 6514 +transport = tcp + +# Option B: Using Elastic Auditbeat +# Install auditbeat and configure /etc/auditbeat/auditbeat.yml +# Auditbeat reads directly from the kernel audit framework +``` + +## Key Concepts + +| Term | Definition | +|------|------------| +| **auditd** | The Linux Audit daemon that receives audit events from the kernel and writes them to `/var/log/audit/audit.log` | +| **auditctl** | Command-line utility to control the audit system: add/remove rules, check status, set backlog size | +| **ausearch** | Query tool that searches audit logs by message type, user, file, key, time range, or event ID | +| **aureport** | Reporting tool that generates aggregate summaries of audit events for triage and compliance | +| **audit rule key (-k)** | A user-defined label attached to an audit rule, enabling fast filtering of related events with ausearch and aureport | +| **syscall auditing** | Kernel-level monitoring of system calls (execve, open, connect, ptrace) that captures process and file activity | +| **augenrules** | Utility that merges all files in `/etc/audit/rules.d/` into `/etc/audit/audit.rules` and loads them into the kernel | + +## Verification + +- [ ] auditd is running and rules are loaded (`auditctl -l` returns expected rule count) +- [ ] No audit backlog overflow (`auditctl -s` shows `backlog: 0` or low value, lost: 0) +- [ ] ausearch returns events for each custom key (`ausearch -k -ts today` returns results) +- [ ] aureport generates non-empty summaries for authentication, executable, and file events +- [ ] Timeline reconstruction produces a coherent chronological sequence of attacker actions +- [ ] Critical file watches trigger alerts on test modifications (`touch /etc/shadow` generates an event) +- [ ] Logs are forwarding to central SIEM (verify with a test event and confirm receipt) +- [ ] Audit rules persist across reboot (rules in `/etc/audit/rules.d/`, not only via `auditctl`) diff --git a/personas/_shared/skills/analyzing-linux-audit-logs-for-intrusion/references/api-reference.md b/personas/_shared/skills/analyzing-linux-audit-logs-for-intrusion/references/api-reference.md new file mode 100644 index 0000000..ea9f41a --- /dev/null +++ b/personas/_shared/skills/analyzing-linux-audit-logs-for-intrusion/references/api-reference.md @@ -0,0 +1,89 @@ +# API Reference: Analyzing Linux Audit Logs for Intrusion + +## Audit Log Location +``` +/var/log/audit/audit.log +``` + +## ausearch CLI +```bash +# Search by key +ausearch -k file_access + +# Search by message type +ausearch -m EXECVE + +# Failed events only +ausearch --success no + +# By user +ausearch -ua 1000 + +# CSV output for Python processing +ausearch --format csv > audit_events.csv + +# By time range +ausearch --start today --end now +ausearch --start 01/15/2025 00:00:00 --end 01/16/2025 00:00:00 +``` + +## aureport CLI +```bash +# Summary report +aureport --summary + +# Authentication report +aureport -au + +# Failed events +aureport --failed + +# Executable report +aureport -x + +# File access report +aureport -f + +# Anomaly report +aureport --anomaly +``` + +## Audit Rules (auditctl) +```bash +# Monitor sensitive files +auditctl -w /etc/passwd -p rwxa -k passwd_access +auditctl -w /etc/shadow -p rwxa -k shadow_access +auditctl -w /etc/sudoers -p rwxa -k sudoers_access + +# Monitor privilege escalation +auditctl -a always,exit -F arch=b64 -S execve -F euid=0 -F uid!=0 -k priv_esc + +# Monitor module loading +auditctl -a always,exit -F arch=b64 -S init_module -S finit_module -k modules + +# Monitor network connections +auditctl -a always,exit -F arch=b64 -S connect -k network_connect +``` + +## Audit Log Fields +| Field | Description | +|-------|------------| +| type | Event type (SYSCALL, PATH, EXECVE, USER_CMD) | +| msg | audit(timestamp:event_id) | +| syscall | System call number | +| uid/euid | User ID / Effective UID | +| comm | Command name | +| exe | Executable path | +| key | Audit rule key | +| success | yes/no | +| name | File path (in PATH records) | + +## Suspicious Syscalls +| Syscall | Concern | +|---------|---------| +| execve | Program execution | +| ptrace | Process debugging/injection | +| init_module | Kernel rootkit loading | +| connect | Outbound connection | +| setuid | Privilege change | +| open_by_handle_at | Container escape | diff --git a/personas/_shared/skills/analyzing-linux-audit-logs-for-intrusion/scripts/agent.py b/personas/_shared/skills/analyzing-linux-audit-logs-for-intrusion/scripts/agent.py new file mode 100644 index 0000000..7727e70 --- /dev/null +++ b/personas/_shared/skills/analyzing-linux-audit-logs-for-intrusion/scripts/agent.py @@ -0,0 +1,221 @@ +#!/usr/bin/env python3 +"""Linux audit log analysis agent for intrusion detection. + +Parses /var/log/audit/audit.log entries to detect privilege escalation, +unauthorized file access, suspicious syscalls, and process execution anomalies. +""" + +import argparse +import json +import re +import sys +import datetime +import collections +import subprocess + + +SUSPICIOUS_SYSCALLS = { + "execve": "Program execution", + "connect": "Network connection", + "bind": "Port binding", + "ptrace": "Process tracing/debugging", + "init_module": "Kernel module loading", + "finit_module": "Kernel module loading", + "delete_module": "Kernel module unloading", + "mount": "Filesystem mount", + "umount2": "Filesystem unmount", + "setuid": "UID change", + "setgid": "GID change", + "sethostname": "Hostname change", + "open_by_handle_at": "File open by handle (container escape)", +} + +SENSITIVE_PATHS = [ + "/etc/passwd", "/etc/shadow", "/etc/sudoers", + "/etc/ssh/sshd_config", "/root/.ssh/authorized_keys", + "/etc/crontab", "/var/spool/cron", +] + +SUSPICIOUS_COMMANDS = [ + "curl", "wget", "nc", "ncat", "nmap", "tcpdump", + "python", "perl", "ruby", "gcc", "cc", "make", + "useradd", "usermod", "groupadd", "visudo", + "iptables", "ip6tables", "nft", +] + + +def parse_audit_log(log_path, max_lines=50000): + """Parse raw audit.log file into structured events.""" + events = [] + current = {} + try: + with open(log_path, "r") as f: + for i, line in enumerate(f): + if i >= max_lines: + break + match = re.match( + r"type=(\S+)\s+msg=audit\((\d+\.\d+):(\d+)\):\s*(.*)", line + ) + if not match: + continue + event_type = match.group(1) + timestamp = float(match.group(2)) + event_id = match.group(3) + data_str = match.group(4) + fields = dict(re.findall(r'(\w+)=("[^"]*"|\S+)', data_str)) + for k, v in fields.items(): + fields[k] = v.strip('"') + event = { + "type": event_type, + "timestamp": datetime.datetime.fromtimestamp(timestamp).isoformat(), + "event_id": event_id, + **fields, + } + events.append(event) + except FileNotFoundError: + return {"error": f"Log file not found: {log_path}"} + return events + + +def detect_privilege_escalation(events): + """Detect privilege escalation indicators in audit events.""" + findings = [] + for e in events: + if e.get("type") == "SYSCALL" and e.get("syscall_name") in ("setuid", "setgid", "execve"): + if e.get("uid") != "0" and e.get("euid") == "0": + findings.append({ + "type": "privilege_escalation", + "detail": f"UID {e.get('uid')} escalated to eUID 0", + "command": e.get("comm", ""), + "exe": e.get("exe", ""), + "timestamp": e.get("timestamp"), + "severity": "CRITICAL", + }) + if e.get("type") == "USER_CMD" and "sudo" in e.get("cmd", "").lower(): + findings.append({ + "type": "sudo_usage", + "user": e.get("acct", e.get("uid", "")), + "command": e.get("cmd", ""), + "timestamp": e.get("timestamp"), + "severity": "MEDIUM", + }) + return findings + + +def detect_file_access(events): + """Detect access to sensitive files.""" + findings = [] + for e in events: + if e.get("type") in ("PATH", "SYSCALL"): + path = e.get("name", e.get("exe", "")) + for sensitive in SENSITIVE_PATHS: + if sensitive in path: + findings.append({ + "type": "sensitive_file_access", + "path": path, + "syscall": e.get("syscall_name", e.get("syscall", "")), + "user": e.get("uid", ""), + "timestamp": e.get("timestamp"), + "severity": "HIGH", + }) + break + return findings + + +def detect_suspicious_commands(events): + """Detect execution of suspicious commands.""" + findings = [] + for e in events: + if e.get("type") in ("EXECVE", "SYSCALL"): + comm = e.get("comm", "").lower() + exe = e.get("exe", "").lower() + for cmd in SUSPICIOUS_COMMANDS: + if cmd in comm or cmd in exe: + findings.append({ + "type": "suspicious_command", + "command": comm, + "exe": exe, + "user": e.get("uid", ""), + "timestamp": e.get("timestamp"), + "severity": "MEDIUM", + }) + break + return findings + + +def run_ausearch(key=None, message_type=None, success=None): + """Run ausearch command and return results.""" + cmd = ["ausearch"] + if key: + cmd.extend(["-k", key]) + if message_type: + cmd.extend(["-m", message_type]) + if success is not None: + cmd.extend(["--success", "yes" if success else "no"]) + cmd.extend(["--format", "csv"]) + try: + result = subprocess.run(cmd, capture_output=True, text=True, timeout=30) + return {"output": result.stdout[:5000], "exit_code": result.returncode} + except (FileNotFoundError, subprocess.TimeoutExpired) as e: + return {"error": str(e)} + + +def generate_summary(events, findings): + """Generate audit log analysis summary.""" + event_types = collections.Counter(e.get("type") for e in events) + finding_types = collections.Counter(f.get("type") for f in findings) + severity_counts = collections.Counter(f.get("severity") for f in findings) + return { + "total_events": len(events), + "event_types": dict(event_types.most_common(10)), + "total_findings": len(findings), + "finding_types": dict(finding_types), + "by_severity": dict(severity_counts), + } + + +def main(): + parser = argparse.ArgumentParser(description="Linux audit log intrusion detection agent") + parser.add_argument("log_file", nargs="?", default="/var/log/audit/audit.log", + help="Path to audit.log (default: /var/log/audit/audit.log)") + parser.add_argument("--max-lines", type=int, default=50000, help="Max log lines to parse") + parser.add_argument("--ausearch-key", help="Run ausearch with this key") + parser.add_argument("--output", "-o", help="Output JSON report path") + args = parser.parse_args() + + print("[*] Linux Audit Log Intrusion Detection Agent") + + if args.ausearch_key: + result = run_ausearch(key=args.ausearch_key) + print(json.dumps(result, indent=2)) + sys.exit(0) + + events = parse_audit_log(args.log_file, args.max_lines) + if isinstance(events, dict) and "error" in events: + print(f"[!] {events['error']}") + print("[DEMO] Specify a valid audit.log path or run on a Linux system") + print(json.dumps({"demo": True, "monitored_syscalls": len(SUSPICIOUS_SYSCALLS)}, indent=2)) + sys.exit(0) + + findings = [] + findings.extend(detect_privilege_escalation(events)) + findings.extend(detect_file_access(events)) + findings.extend(detect_suspicious_commands(events)) + + summary = generate_summary(events, findings) + print(f"[*] Events parsed: {summary['total_events']}") + print(f"[*] Findings: {summary['total_findings']}") + print(f" By severity: {summary['by_severity']}") + for f in findings[:15]: + print(f" [{f['severity']}] {f['type']}: {f.get('command', f.get('path', ''))}") + + if args.output: + report = {"summary": summary, "findings": findings} + with open(args.output, "w") as f: + json.dump(report, f, indent=2, default=str) + + print(json.dumps(summary, indent=2)) + + +if __name__ == "__main__": + main() diff --git a/personas/_shared/skills/analyzing-linux-elf-malware/LICENSE b/personas/_shared/skills/analyzing-linux-elf-malware/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-linux-elf-malware/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-linux-elf-malware/SKILL.md b/personas/_shared/skills/analyzing-linux-elf-malware/SKILL.md new file mode 100644 index 0000000..66f0f45 --- /dev/null +++ b/personas/_shared/skills/analyzing-linux-elf-malware/SKILL.md @@ -0,0 +1,341 @@ +--- +name: analyzing-linux-elf-malware +description: 'Analyzes malicious Linux ELF (Executable and Linkable Format) binaries including botnets, cryptominers, ransomware, + and rootkits targeting Linux servers, containers, and cloud infrastructure. Covers static analysis, dynamic tracing, and + reverse engineering of x86_64 and ARM ELF samples. Activates for requests involving Linux malware analysis, ELF binary investigation, + Linux server compromise assessment, or container malware analysis. + + ' +domain: cybersecurity +subdomain: malware-analysis +tags: +- malware +- Linux +- ELF +- reverse-engineering +- server-malware +version: 1.0.0 +author: mahipal +license: Apache-2.0 +nist_csf: +- DE.AE-02 +- RS.AN-03 +- ID.RA-01 +- DE.CM-01 +--- + +# Analyzing Linux ELF Malware + +## When to Use + +- A Linux server or container has been compromised and suspicious ELF binaries are found +- Analyzing Linux botnets (Mirai, Gafgyt, XorDDoS), cryptominers, or ransomware +- Investigating malware targeting cloud infrastructure, Docker containers, or Kubernetes pods +- Reverse engineering Linux rootkits and kernel modules +- Analyzing cross-platform malware compiled for Linux x86_64, ARM, or MIPS architectures + +**Do not use** for Windows PE binary analysis; use PEStudio, Ghidra, or IDA for Windows malware. + +## Prerequisites + +- Ghidra or IDA with Linux ELF support for disassembly and decompilation +- Linux analysis VM (Ubuntu 22.04 recommended) with development tools installed +- strace, ltrace, and GDB for dynamic analysis and debugging +- readelf, objdump, and nm from GNU binutils for static inspection +- Radare2 for quick binary triage and scripted analysis +- Docker for isolated container-based malware execution + +## Workflow + +### Step 1: Identify ELF Binary Properties + +Examine the ELF header and basic properties: + +```bash +# File type identification +file suspect_binary + +# Detailed ELF header analysis +readelf -h suspect_binary + +# Section headers +readelf -S suspect_binary + +# Program headers (segments) +readelf -l suspect_binary + +# Symbol table (if not stripped) +readelf -s suspect_binary +nm suspect_binary 2>/dev/null + +# Dynamic linking information +readelf -d suspect_binary +ldd suspect_binary 2>/dev/null # Only on matching architecture! + +# Compute hashes +md5sum suspect_binary +sha256sum suspect_binary + +# Check for packing/UPX +upx -t suspect_binary +``` + +```python +# Python-based ELF analysis +from elftools.elf.elffile import ELFFile +import hashlib + +with open("suspect_binary", "rb") as f: + data = f.read() + sha256 = hashlib.sha256(data).hexdigest() + +with open("suspect_binary", "rb") as f: + elf = ELFFile(f) + + print(f"SHA-256: {sha256}") + print(f"Class: {elf.elfclass}-bit") + print(f"Endian: {elf.little_endian and 'Little' or 'Big'}") + print(f"Machine: {elf.header.e_machine}") + print(f"Type: {elf.header.e_type}") + print(f"Entry Point: 0x{elf.header.e_entry:X}") + + # Check if stripped + symtab = elf.get_section_by_name('.symtab') + print(f"Stripped: {'Yes' if symtab is None else 'No'}") + + # Section entropy analysis + import math + from collections import Counter + for section in elf.iter_sections(): + data = section.data() + if len(data) > 0: + entropy = -sum((c/len(data)) * math.log2(c/len(data)) + for c in Counter(data).values() if c > 0) + if entropy > 7.0: + print(f" [!] High entropy section: {section.name} ({entropy:.2f})") +``` + +### Step 2: Extract Strings and Indicators + +Search for embedded IOCs and functionality clues: + +```bash +# ASCII strings +strings suspect_binary > strings_output.txt + +# Search for network indicators +grep -iE "(http|https|ftp)://" strings_output.txt +grep -iE "([0-9]{1,3}\.){3}[0-9]{1,3}" strings_output.txt +grep -iE "[a-zA-Z0-9.-]+\.(com|net|org|io|ru|cn)" strings_output.txt + +# Search for shell commands +grep -iE "(bash|sh|wget|curl|chmod|/tmp/|/dev/)" strings_output.txt + +# Search for crypto mining indicators +grep -iE "(stratum|xmr|monero|pool\.|mining)" strings_output.txt + +# Search for SSH/credential theft +grep -iE "(ssh|authorized_keys|id_rsa|shadow|passwd)" strings_output.txt + +# Search for persistence mechanisms +grep -iE "(crontab|systemd|init\.d|rc\.local|ld\.so\.preload)" strings_output.txt + +# FLOSS for obfuscated strings (if available) +floss suspect_binary +``` + +### Step 3: Analyze System Calls and Library Usage + +Identify what system calls and libraries the malware uses: + +```bash +# List imported functions (dynamically linked) +readelf -r suspect_binary | grep -E "socket|connect|exec|fork|open|write|bind|listen" + +# Trace system calls during execution (in isolated VM only) +strace -f -e trace=network,process,file -o strace_output.txt ./suspect_binary + +# Trace library calls +ltrace -f -o ltrace_output.txt ./suspect_binary + +# Key system calls to watch: +# Network: socket, connect, bind, listen, accept, sendto, recvfrom +# Process: fork, execve, clone, kill, ptrace +# File: open, read, write, unlink, rename, chmod +# Persistence: inotify_add_watch (file monitoring) +``` + +### Step 4: Dynamic Analysis with GDB + +Debug the malware to observe runtime behavior: + +```bash +# Start GDB with the binary +gdb ./suspect_binary + +# Set breakpoints on key functions +(gdb) break main +(gdb) break socket +(gdb) break connect +(gdb) break execve +(gdb) break fork + +# Run and analyze +(gdb) run +(gdb) info registers # View register state +(gdb) x/20s $rdi # Examine string argument +(gdb) bt # Backtrace +(gdb) continue + +# For stripped binaries, break on entry point +(gdb) break *0x400580 # Entry point from readelf +(gdb) run + +# Monitor network connections during execution +# In another terminal: +ss -tlnp # List listening sockets +ss -tnp # List established connections +``` + +### Step 5: Reverse Engineer with Ghidra + +Perform deep code analysis on the ELF binary: + +``` +Ghidra Analysis for Linux ELF: +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +1. Import: File -> Import -> Select ELF binary + - Ghidra auto-detects ELF format and architecture + - Accept default analysis options + +2. Key analysis targets: + - main() function (or entry point if stripped) + - Socket creation and connection functions + - Command dispatch logic (switch/case on received data) + - Encryption/encoding routines + - Persistence installation code + - Self-propagation/scanning functions + +3. For Mirai-like botnets, look for: + - Credential list for brute-forcing (telnet/SSH) + - Attack module selection (UDP flood, SYN flood, ACK flood) + - Scanner module (port scanning for vulnerable devices) + - Killer module (killing competing botnets) + +4. For cryptominers, look for: + - Mining pool connection (stratum protocol) + - Wallet address strings + - CPU/GPU utilization functions + - Process hiding techniques +``` + +### Step 6: Analyze Linux-Specific Persistence + +Check for persistence mechanisms: + +```bash +# Check for LD_PRELOAD rootkit +strings suspect_binary | grep "ld.so.preload" +# Malware writing to /etc/ld.so.preload can hook all dynamic library calls + +# Check for crontab persistence +strings suspect_binary | grep -i "cron" + +# Check for systemd service creation +strings suspect_binary | grep -iE "systemd|\.service|systemctl" + +# Check for init script creation +strings suspect_binary | grep -iE "init\.d|rc\.local|update-rc" + +# Check for SSH key injection +strings suspect_binary | grep -i "authorized_keys" + +# Check for kernel module (rootkit) loading +strings suspect_binary | grep -iE "insmod|modprobe|init_module" + +# Check for process hiding +strings suspect_binary | grep -iE "proc|readdir|getdents" +``` + +## Key Concepts + +| Term | Definition | +|------|------------| +| **ELF (Executable and Linkable Format)** | Standard binary format for Linux executables, shared libraries, and core dumps containing headers, sections, and segments | +| **Stripped Binary** | ELF binary with debug symbols removed, making reverse engineering more difficult as function names are lost | +| **LD_PRELOAD** | Linux environment variable specifying shared libraries to load before all others; abused by rootkits to intercept system library calls | +| **strace** | Linux system call tracer that logs all system calls and signals made by a process, revealing file, network, and process operations | +| **GOT/PLT** | Global Offset Table and Procedure Linkage Table; ELF structures for dynamic linking that can be hijacked for function hooking | +| **Statically Linked** | Binary compiled with all library code included; common in IoT malware to run on systems without matching shared libraries | +| **Mirai** | Prolific Linux botnet targeting IoT devices via telnet brute-force; source code leaked, leading to many variants | + +## Tools & Systems + +- **Ghidra**: NSA reverse engineering tool with full ELF support for x86, x86_64, ARM, MIPS, and other Linux architectures +- **Radare2**: Open-source reverse engineering framework with command-line interface for quick binary analysis and scripting +- **strace**: Linux system call tracing tool for observing binary behavior including file, network, and process operations +- **GDB**: GNU Debugger for setting breakpoints, examining memory, and stepping through Linux binary execution +- **pyelftools**: Python library for parsing ELF files programmatically for automated analysis pipelines + +## Common Scenarios + +### Scenario: Analyzing a Cryptominer Found on a Compromised Linux Server + +**Context**: A cloud server shows 100% CPU usage. Investigation reveals an unknown binary running from /tmp with a suspicious name. The binary needs analysis to confirm it is a cryptominer and identify the attacker's wallet and pool. + +**Approach**: +1. Copy the binary to an analysis VM and compute SHA-256 hash +2. Run `file` and `readelf` to identify architecture and linking type +3. Extract strings and search for mining pool addresses (stratum+tcp://) and wallet addresses +4. Run with strace in a sandbox to observe network connections (mining pool connection) +5. Import into Ghidra to identify the mining algorithm and configuration extraction +6. Check for persistence mechanisms (crontab, systemd service, SSH keys) +7. Document all IOCs including pool address, wallet, C2 for updates, and persistence artifacts + +**Pitfalls**: +- Running `ldd` on malware outside a sandbox (ldd can execute code in the binary) +- Not checking for ARM/MIPS architecture before attempting x86_64 execution +- Missing companion scripts (.sh files) that may handle persistence and cleanup +- Ignoring the initial access vector (how the miner was deployed: SSH brute force, web exploit, container escape) + +## Output Format + +``` +LINUX ELF MALWARE ANALYSIS REPORT +==================================== +File: /tmp/.X11-unix/.rsync +SHA-256: e3b0c44298fc1c149afbf4c8996fb924... +Type: ELF 64-bit LSB executable, x86-64 +Linking: Statically linked (all libraries embedded) +Stripped: Yes +Size: 2,847,232 bytes +Packer: UPX 3.96 (unpacked for analysis) + +CLASSIFICATION +Family: XMRig Cryptominer (modified) +Variant: Custom build with C2 update mechanism + +FUNCTIONALITY +[*] XMR (Monero) mining via RandomX algorithm +[*] Stratum pool connection for work submission +[*] C2 check-in for configuration updates +[*] Process name masquerading (argv[0] = "[kworker/0:0]") +[*] Competitor process killing (kills other miners) +[*] SSH key injection for re-access + +NETWORK INDICATORS +Mining Pool: stratum+tcp://pool.minexmr[.]com:4444 +C2 Server: hxxp://update.malicious[.]com/config +Wallet: 49jZ5Q3b...Monero_Wallet_Address... + +PERSISTENCE +[1] Crontab entry: */5 * * * * /tmp/.X11-unix/.rsync +[2] SSH key added to /root/.ssh/authorized_keys +[3] Systemd service: /etc/systemd/system/rsync-daemon.service +[4] Modified /etc/ld.so.preload for process hiding + +PROCESS HIDING +LD_PRELOAD: /usr/lib/.libsystem.so +Hook: readdir() to hide /tmp/.X11-unix/.rsync from ls +Hook: fopen() to hide from /proc/*/maps reading +``` diff --git a/personas/_shared/skills/analyzing-linux-elf-malware/references/api-reference.md b/personas/_shared/skills/analyzing-linux-elf-malware/references/api-reference.md new file mode 100644 index 0000000..126d04b --- /dev/null +++ b/personas/_shared/skills/analyzing-linux-elf-malware/references/api-reference.md @@ -0,0 +1,119 @@ +# API Reference: Linux ELF Malware Analysis Tools + +## readelf - ELF Binary Inspection + +### Syntax +```bash +readelf -h # ELF header +readelf -S # Section headers +readelf -l # Program headers (segments) +readelf -s # Symbol table +readelf -d # Dynamic section +readelf -r # Relocation entries +readelf -n # Notes section +``` + +### Key ELF Header Fields +| Field | Description | +|-------|-------------| +| `Class` | 32-bit or 64-bit | +| `Machine` | Architecture (x86-64, ARM, MIPS) | +| `Type` | EXEC (executable), DYN (shared object) | +| `Entry point` | Code execution start address | + +## pyelftools - Python ELF Parsing + +### Usage +```python +from elftools.elf.elffile import ELFFile + +with open("binary", "rb") as f: + elf = ELFFile(f) + elf.elfclass # 32 or 64 + elf.little_endian # True/False + elf.header.e_machine # Architecture + elf.header.e_entry # Entry point + elf.num_sections() # Section count + elf.get_section_by_name(".symtab") # Symbol table +``` + +## strings - String Extraction + +### Syntax +```bash +strings # ASCII strings (default min 4) +strings -n 8 # Minimum 8 characters +strings -e l # 16-bit little-endian (Unicode) +strings -t x # Print offset in hex +``` + +## strace - System Call Tracing + +### Syntax +```bash +strace -f ./binary # Follow forks +strace -e trace=network ./binary # Network calls only +strace -e trace=file ./binary # File operations only +strace -e trace=process ./binary # Process operations +strace -o output.txt ./binary # Log to file +strace -c ./binary # Summary statistics +``` + +### Key System Calls +| Call | Category | +|------|----------| +| `socket`, `connect`, `bind` | Network | +| `fork`, `execve`, `clone` | Process | +| `open`, `read`, `write`, `unlink` | File I/O | +| `ptrace` | Anti-debug/injection | + +## ltrace - Library Call Tracing + +### Syntax +```bash +ltrace -f ./binary # Follow child processes +ltrace -e malloc+free ./binary # Specific functions +ltrace -o output.txt ./binary # Log to file +``` + +## GDB - GNU Debugger + +### Syntax +```bash +gdb ./binary +(gdb) break main +(gdb) break *0x400580 # Break at address +(gdb) run +(gdb) info registers +(gdb) x/20s $rdi # Examine string at RDI +(gdb) x/10i $rip # Disassemble at RIP +(gdb) bt # Backtrace +``` + +## UPX - Packer Detection/Unpacking + +### Syntax +```bash +upx -t # Test if packed +upx -d # Decompress/unpack +upx -l # List compression details +``` + +## objdump - Disassembly + +### Syntax +```bash +objdump -d # Disassemble .text +objdump -D # Disassemble all sections +objdump -M intel -d # Intel syntax +objdump -t # Symbol table +``` + +## nm - Symbol Listing + +### Syntax +```bash +nm # List symbols +nm -D # Dynamic symbols only +nm -u # Undefined (imported) symbols +``` diff --git a/personas/_shared/skills/analyzing-linux-elf-malware/scripts/agent.py b/personas/_shared/skills/analyzing-linux-elf-malware/scripts/agent.py new file mode 100644 index 0000000..d3455e4 --- /dev/null +++ b/personas/_shared/skills/analyzing-linux-elf-malware/scripts/agent.py @@ -0,0 +1,230 @@ +#!/usr/bin/env python3 +"""Linux ELF malware static analysis agent using pyelftools and binary inspection.""" + +import hashlib +import math +import os +import sys +import subprocess +from collections import Counter + +try: + from elftools.elf.elffile import ELFFile + HAS_ELFTOOLS = True +except ImportError: + HAS_ELFTOOLS = False + + +def compute_hashes(filepath): + """Compute MD5, SHA1, and SHA256 hashes of a file.""" + md5 = hashlib.md5() + sha1 = hashlib.sha1() + sha256 = hashlib.sha256() + with open(filepath, "rb") as f: + for chunk in iter(lambda: f.read(65536), b""): + md5.update(chunk) + sha1.update(chunk) + sha256.update(chunk) + return {"md5": md5.hexdigest(), "sha1": sha1.hexdigest(), "sha256": sha256.hexdigest()} + + +def calculate_entropy(data): + """Calculate Shannon entropy of binary data.""" + if not data: + return 0.0 + counter = Counter(data) + length = len(data) + return -sum((c / length) * math.log2(c / length) for c in counter.values()) + + +def analyze_elf_header(filepath): + """Parse ELF header and extract key properties.""" + if not HAS_ELFTOOLS: + return {"error": "pyelftools not installed: pip install pyelftools"} + with open(filepath, "rb") as f: + elf = ELFFile(f) + symtab = elf.get_section_by_name(".symtab") + info = { + "class": f"{elf.elfclass}-bit", + "endian": "Little" if elf.little_endian else "Big", + "machine": elf.header.e_machine, + "type": elf.header.e_type, + "entry_point": f"0x{elf.header.e_entry:X}", + "stripped": symtab is None, + "num_sections": elf.num_sections(), + "num_segments": elf.num_segments(), + } + return info + + +def analyze_sections(filepath): + """Analyze ELF sections for entropy and suspicious characteristics.""" + if not HAS_ELFTOOLS: + return [] + sections = [] + with open(filepath, "rb") as f: + elf = ELFFile(f) + for section in elf.iter_sections(): + data = section.data() + if len(data) == 0: + continue + entropy = calculate_entropy(data) + sections.append({ + "name": section.name, + "type": section["sh_type"], + "size": len(data), + "entropy": round(entropy, 4), + "high_entropy": entropy > 7.0, + "flags": section["sh_flags"], + }) + return sections + + +def extract_strings(filepath, min_length=6): + """Extract ASCII strings from the binary and categorize by type.""" + stdout, _, rc = subprocess.run( + ["strings", "-n", str(min_length), filepath], + capture_output=True, text=True, timeout=120 + ).stdout, "", 0 + if not stdout: + return {} + all_strings = stdout.strip().splitlines() + categorized = { + "urls": [], "ips": [], "domains": [], "shell_commands": [], + "crypto_mining": [], "persistence": [], "ssh_related": [], + "total": len(all_strings), + } + for s in all_strings: + s_lower = s.lower() + if any(proto in s_lower for proto in ["http://", "https://", "ftp://"]): + categorized["urls"].append(s) + if any(p in s_lower for p in ["stratum", "xmr", "monero", "pool.", "mining"]): + categorized["crypto_mining"].append(s) + if any(p in s_lower for p in ["crontab", "systemd", "init.d", "rc.local", + "ld.so.preload", "systemctl"]): + categorized["persistence"].append(s) + if any(p in s_lower for p in ["ssh", "authorized_keys", "id_rsa", "shadow", "passwd"]): + categorized["ssh_related"].append(s) + if any(p in s_lower for p in ["bash", "wget", "curl", "chmod", "/tmp/", "/dev/"]): + categorized["shell_commands"].append(s) + import re + if re.match(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", s): + categorized["ips"].append(s) + if re.match(r"[a-zA-Z0-9.-]+\.(com|net|org|io|ru|cn|xyz)", s): + categorized["domains"].append(s) + return categorized + + +def check_packing(filepath): + """Check if the binary is packed with UPX or other packers.""" + with open(filepath, "rb") as f: + data = f.read(4096) + indicators = [] + if b"UPX!" in data: + indicators.append("UPX packer detected (UPX! magic)") + if b"UPX0" in data or b"UPX1" in data: + indicators.append("UPX section names found") + stdout, _, _ = subprocess.run(["upx", "-t", filepath], + capture_output=True, text=True, + stderr=subprocess.STDOUT, timeout=120).stdout, "", 0 + if stdout and "packed" in stdout.lower(): + indicators.append("UPX verification confirms packing") + return indicators + + +def analyze_dynamic_linking(filepath): + """Analyze dynamic linking information and imported functions.""" + stdout, _, rc = subprocess.run(["readelf", "-d", filepath], + capture_output=True, text=True, timeout=120).stdout, "", 0 + dynamic_info = {"libraries": [], "rpath": None} + if stdout: + for line in stdout.splitlines(): + if "NEEDED" in line: + lib = line.split("[")[-1].rstrip("]") if "[" in line else "" + dynamic_info["libraries"].append(lib) + if "RPATH" in line or "RUNPATH" in line: + dynamic_info["rpath"] = line.split("[")[-1].rstrip("]") + + readelf_proc = subprocess.run( + ["readelf", "-r", filepath], + capture_output=True, text=True, + timeout=120, + ) + import re as _re + suspicious_funcs = _re.compile(r'socket|connect|exec|fork|open|write|bind|listen|send|recv') + stdout2 = "\n".join( + line for line in (readelf_proc.stdout or "").splitlines() + if suspicious_funcs.search(line) + ) + dynamic_info["suspicious_imports"] = [ + line.strip() for line in (stdout2 or "").splitlines() if line.strip() + ] + return dynamic_info + + +def detect_malware_type(strings_data): + """Classify malware type based on extracted strings.""" + classifications = [] + if strings_data.get("crypto_mining"): + classifications.append("Cryptominer") + if any("flood" in s.lower() or "ddos" in s.lower() + for s in strings_data.get("shell_commands", [])): + classifications.append("DDoS Botnet") + if strings_data.get("ssh_related") and strings_data.get("persistence"): + classifications.append("Backdoor/Trojan") + if any("insmod" in s or "modprobe" in s or "init_module" in s + for s in strings_data.get("shell_commands", [])): + classifications.append("Rootkit") + if any("ransom" in s.lower() or "encrypt" in s.lower() or "bitcoin" in s.lower() + for cat in strings_data.values() if isinstance(cat, list) for s in cat): + classifications.append("Ransomware") + return classifications or ["Unknown"] + + +if __name__ == "__main__": + print("=" * 60) + print("Linux ELF Malware Analysis Agent") + print("Static analysis with pyelftools, strings, readelf") + print("=" * 60) + + target = sys.argv[1] if len(sys.argv) > 1 else None + + if target and os.path.exists(target): + print(f"\n[*] Analyzing: {target}") + print(f"[*] Size: {os.path.getsize(target)} bytes") + + hashes = compute_hashes(target) + print(f"[*] MD5: {hashes['md5']}") + print(f"[*] SHA256: {hashes['sha256']}") + + elf_info = analyze_elf_header(target) + print(f"\n--- ELF Header ---") + for k, v in elf_info.items(): + print(f" {k}: {v}") + + packing = check_packing(target) + if packing: + for p in packing: + print(f"[!] {p}") + + sections = analyze_sections(target) + high_ent = [s for s in sections if s.get("high_entropy")] + if high_ent: + print(f"\n[!] High entropy sections (possible packing/encryption):") + for s in high_ent: + print(f" {s['name']}: entropy={s['entropy']}, size={s['size']}") + + strings_data = extract_strings(target) + print(f"\n--- Strings Analysis ({strings_data.get('total', 0)} total) ---") + for category in ["urls", "ips", "domains", "crypto_mining", "persistence", "ssh_related"]: + items = strings_data.get(category, []) + if items: + print(f" {category}: {len(items)}") + for item in items[:5]: + print(f" - {item}") + + classification = detect_malware_type(strings_data) + print(f"\n[*] Classification: {', '.join(classification)}") + else: + print(f"\n[DEMO] Usage: python agent.py ") + print("[*] Provide a Linux ELF binary for analysis.") diff --git a/personas/_shared/skills/analyzing-linux-kernel-rootkits/LICENSE b/personas/_shared/skills/analyzing-linux-kernel-rootkits/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-linux-kernel-rootkits/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-linux-kernel-rootkits/SKILL.md b/personas/_shared/skills/analyzing-linux-kernel-rootkits/SKILL.md new file mode 100644 index 0000000..6b89330 --- /dev/null +++ b/personas/_shared/skills/analyzing-linux-kernel-rootkits/SKILL.md @@ -0,0 +1,135 @@ +--- +name: analyzing-linux-kernel-rootkits +description: Detect kernel-level rootkits in Linux memory dumps using Volatility3 linux plugins (check_syscall, lsmod, hidden_modules), + rkhunter system scanning, and /proc vs /sys discrepancy analysis to identify hooked syscalls, hidden kernel modules, and + tampered system structures. +domain: cybersecurity +subdomain: digital-forensics +tags: +- rootkit +- linux +- kernel +- volatility3 +- memory-forensics +- malware-analysis +- rkhunter +- forensics +version: '1.0' +author: mahipal +license: Apache-2.0 +nist_csf: +- RS.AN-01 +- RS.AN-03 +- DE.AE-02 +- RS.MA-01 +--- + +# Analyzing Linux Kernel Rootkits + +## Overview + +Linux kernel rootkits operate at ring 0, modifying kernel data structures to hide processes, files, network connections, and kernel modules from userspace tools. Detection requires either memory forensics (analyzing physical memory dumps with Volatility3) or cross-view analysis (comparing /proc, /sys, and kernel data structures for inconsistencies). This skill covers using Volatility3 Linux plugins to detect syscall table hooks, hidden kernel modules, and modified function pointers, supplemented by live system scanning with rkhunter and chkrootkit. + + +## When to Use + +- When investigating security incidents that require analyzing linux kernel rootkits +- When building detection rules or threat hunting queries for this domain +- When SOC analysts need structured procedures for this analysis type +- When validating security monitoring coverage for related attack techniques + +## Prerequisites + +- Volatility3 installed (pip install volatility3) +- Linux memory dump (acquired via LiME, AVML, or /proc/kcore) +- Volatility3 Linux symbol table (ISF) matching the target kernel version +- rkhunter and chkrootkit for live system scanning +- Reference known-good kernel image for comparison + +## Steps + +### Step 1: Acquire Memory Dump +Capture Linux physical memory using LiME kernel module or AVML for cloud instances. + +### Step 2: Analyze with Volatility3 +Run linux.check_syscall, linux.lsmod, linux.hidden_modules, and linux.check_idt plugins to detect rootkit artifacts. + +### Step 3: Cross-View Analysis +Compare module lists from /proc/modules, lsmod, and /sys/module to identify modules hidden from one view but present in another. + +### Step 4: Live System Scanning +Run rkhunter and chkrootkit to detect known rootkit signatures, suspicious files, and modified system binaries. + +## Expected Output + +JSON report containing detected syscall hooks, hidden kernel modules, modified IDT entries, suspicious /proc discrepancies, and rkhunter findings. + +## Example Output + +```text +$ sudo python3 rootkit_analyzer.py --memory /evidence/linux-mem.lime --profile Ubuntu2204 + +Linux Kernel Rootkit Analysis Report +===================================== +Memory Image: /evidence/linux-mem.lime +Kernel Version: 5.15.0-91-generic (Ubuntu 22.04 LTS) +Analysis Time: 2024-01-18 09:15:32 UTC + +[+] Scanning syscall table for hooks... + Syscall Table Base: 0xffffffff82200300 + Total syscalls checked: 449 + + HOOKED SYSCALLS DETECTED: + ┌─────────┬──────────────────┬──────────────────────┬──────────────────────┐ + │ NR │ Syscall │ Expected Address │ Current Address │ + ├─────────┼──────────────────┼──────────────────────┼──────────────────────┤ + │ 0 │ sys_read │ 0xffffffff8139a0e0 │ 0xffffffffc0a12000 │ + │ 2 │ sys_open │ 0xffffffff8139b340 │ 0xffffffffc0a12180 │ + │ 78 │ sys_getdents64 │ 0xffffffff813f5210 │ 0xffffffffc0a12300 │ + │ 62 │ sys_kill │ 0xffffffff8110c4a0 │ 0xffffffffc0a12480 │ + └─────────┴──────────────────┴──────────────────────┴──────────────────────┘ + WARNING: 4 syscall hooks detected - rootkit behavior confirmed + +[+] Checking for hidden kernel modules... + Loaded modules (lsmod): 147 + Modules in kobject list: 149 + HIDDEN MODULES: + - "netfilter_helper" at 0xffffffffc0a10000 (size: 12288) + - "kworker_sched" at 0xffffffffc0a14000 (size: 8192) + +[+] Scanning /proc for discrepancies... + Processes in task_struct list: 234 + Processes visible in /proc: 231 + HIDDEN PROCESSES: + - PID 31337 cmd: "[kworker/0:3]" (disguised as kernel thread) + - PID 31442 cmd: "rsyslogd" (fake, real rsyslogd is PID 892) + - PID 31500 cmd: "" (unnamed process) + +[+] Checking IDT entries... + IDT entries scanned: 256 + Modified entries: 0 (clean) + +[+] Running rkhunter scan... + Checking for known rootkits: 68 variants checked + Diamorphine rootkit: WARNING - signatures match + System binary checks: + /usr/bin/ps: MODIFIED (SHA-256 mismatch) + /usr/bin/netstat: MODIFIED (SHA-256 mismatch) + /usr/bin/ls: MODIFIED (SHA-256 mismatch) + /usr/sbin/ss: OK + +[+] Network analysis... + Hidden connections (not in /proc/net/tcp): + ESTABLISHED 0.0.0.0:0 -> 198.51.100.47:4443 (PID 31337) + ESTABLISHED 0.0.0.0:0 -> 198.51.100.47:8080 (PID 31442) + +Summary: + Rootkit Type: Loadable Kernel Module (LKM) + Probable Family: Diamorphine variant + Syscall Hooks: 4 (read, open, getdents64, kill) + Hidden Modules: 2 + Hidden Processes: 3 + Hidden Connections: 2 (C2: 198.51.100.47) + Modified Binaries: 3 (/usr/bin/ps, netstat, ls) + Risk Level: CRITICAL +``` diff --git a/personas/_shared/skills/analyzing-linux-kernel-rootkits/references/api-reference.md b/personas/_shared/skills/analyzing-linux-kernel-rootkits/references/api-reference.md new file mode 100644 index 0000000..7423fa0 --- /dev/null +++ b/personas/_shared/skills/analyzing-linux-kernel-rootkits/references/api-reference.md @@ -0,0 +1,92 @@ +# API Reference: Analyzing Linux Kernel Rootkits + +## Volatility3 Linux Plugins + +```bash +# Check syscall table for hooks +vol -f memory.lime linux.check_syscall.Check_syscall + +# List loaded kernel modules +vol -f memory.lime linux.lsmod.Lsmod + +# Detect hidden kernel modules +vol -f memory.lime linux.hidden_modules.Hidden_modules + +# Check IDT for hooks +vol -f memory.lime linux.check_idt.Check_idt + +# List processes (detect hidden) +vol -f memory.lime linux.pslist.PsList +vol -f memory.lime linux.pstree.PsTree + +# Check for modified cred structures +vol -f memory.lime linux.check_creds.Check_creds + +# Network connections +vol -f memory.lime linux.sockstat.Sockstat + +# JSON output +vol -f memory.lime linux.check_syscall.Check_syscall -r json > syscalls.json +``` + +## Memory Acquisition Tools + +| Tool | Command | Use Case | +|------|---------|----------| +| LiME | `insmod lime.ko "path=/tmp/mem.lime format=lime"` | Linux kernel module | +| AVML | `avml /tmp/memory.raw` | Azure/cloud instances | +| /proc/kcore | `dd if=/proc/kcore of=mem.raw` | Quick (partial) dump | + +## Volatility3 Symbol Tables (ISF) + +```bash +# Generate ISF from running kernel +vol -f memory.lime banners.Banners +# Download matching ISF from: +# https://github.com/volatilityfoundation/volatility3#symbol-tables +``` + +## rkhunter Commands + +```bash +# Full system scan +rkhunter --check --skip-keypress --report-warnings-only + +# Update signatures +rkhunter --update + +# Check specific tests +rkhunter --check --enable rootkits,trojans,os_specific + +# Output to log file +rkhunter --check --logfile /var/log/rkhunter.log +``` + +## Known Linux Rootkits Detected + +| Rootkit | Technique | Volatility Plugin | +|---------|-----------|-------------------| +| Diamorphine | Hidden module + syscall hook | check_syscall, hidden_modules | +| Reptile | Syscall hook + port knocking | check_syscall | +| KBeast | Syscall hook + /proc hiding | check_syscall, hidden_modules | +| Adore-ng | VFS hook + hidden files | lsmod, check_syscall | +| Jynx2 | LD_PRELOAD userspace | pslist (parent check) | + +## Cross-View Detection + +```bash +# Compare /proc/modules vs /sys/module +diff <(cat /proc/modules | awk '{print $1}' | sort) \ + <(ls /sys/module/ | sort) + +# Check for hidden processes +diff <(ls /proc/ | grep -E '^[0-9]+$' | sort -n) \ + <(ps -eo pid --no-headers | sort -n) +``` + +### References + +- Volatility3 Linux Plugins: https://volatility3.readthedocs.io/en/latest/volatility3.plugins.linux.html +- LiME: https://github.com/504ensicsLabs/LiME +- rkhunter: http://rkhunter.sourceforge.net/ +- MITRE T1014 Rootkit: https://attack.mitre.org/techniques/T1014/ diff --git a/personas/_shared/skills/analyzing-linux-kernel-rootkits/scripts/agent.py b/personas/_shared/skills/analyzing-linux-kernel-rootkits/scripts/agent.py new file mode 100644 index 0000000..6627a27 --- /dev/null +++ b/personas/_shared/skills/analyzing-linux-kernel-rootkits/scripts/agent.py @@ -0,0 +1,176 @@ +#!/usr/bin/env python3 +"""Linux Kernel Rootkit Detection Agent - analyzes memory dumps with Volatility3 and live system with rkhunter.""" + +import json +import argparse +import logging +import subprocess +import os +from datetime import datetime + +logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s") +logger = logging.getLogger(__name__) + + +def run_vol3_plugin(memory_dump, plugin, isf_url=None): + """Run a Volatility3 Linux plugin and return parsed output.""" + cmd = ["vol", "-f", memory_dump, plugin, "-r", "json"] + if isf_url: + cmd.extend(["--isf", isf_url]) + result = subprocess.run(cmd, capture_output=True, text=True, timeout=600) + try: + return json.loads(result.stdout) if result.stdout else [] + except json.JSONDecodeError: + logger.error("Volatility3 %s output parse failed", plugin) + return [] + + +def check_syscall_hooks(memory_dump, isf_url=None): + """Detect hooked system calls using linux.check_syscall.""" + results = run_vol3_plugin(memory_dump, "linux.check_syscall.Check_syscall", isf_url) + hooked = [] + for entry in results: + row = entry.get("__children", [entry]) if isinstance(entry, dict) else [entry] + for item in row: + symbol = item.get("Symbol", item.get("symbol", "")) + module = item.get("Module", item.get("module", "")) + if module and module != "kernel": + hooked.append({ + "syscall_number": item.get("Index", item.get("index", "")), + "expected_handler": symbol, + "actual_module": module, + "severity": "critical", + "indicator": "syscall_hook", + }) + return hooked + + +def detect_hidden_modules(memory_dump, isf_url=None): + """Detect hidden kernel modules using cross-view analysis.""" + lsmod_results = run_vol3_plugin(memory_dump, "linux.lsmod.Lsmod", isf_url) + hidden_results = run_vol3_plugin(memory_dump, "linux.hidden_modules.Hidden_modules", isf_url) + lsmod_names = set() + for entry in lsmod_results: + name = entry.get("Name", entry.get("name", "")) + if name: + lsmod_names.add(name) + hidden = [] + for entry in hidden_results: + name = entry.get("Name", entry.get("name", "")) + if name: + hidden.append({ + "module_name": name, + "in_lsmod": name in lsmod_names, + "severity": "critical", + "indicator": "hidden_kernel_module", + "detail": f"Module '{name}' hidden from standard listing", + }) + return hidden + + +def check_idt_hooks(memory_dump, isf_url=None): + """Check Interrupt Descriptor Table for hooks.""" + results = run_vol3_plugin(memory_dump, "linux.check_idt.Check_idt", isf_url) + hooked = [] + for entry in results: + module = entry.get("Module", entry.get("module", "")) + if module and module != "kernel": + hooked.append({ + "interrupt": entry.get("Index", ""), + "handler_module": module, + "severity": "critical", + "indicator": "idt_hook", + }) + return hooked + + +def run_rkhunter(): + """Run rkhunter rootkit scanner on live system.""" + cmd = ["rkhunter", "--check", "--skip-keypress", "--report-warnings-only", "--nocolors"] + result = subprocess.run(cmd, capture_output=True, text=True, timeout=300) + findings = [] + for line in result.stdout.split("\n"): + line = line.strip() + if "Warning:" in line or "[ Warning ]" in line: + findings.append({ + "tool": "rkhunter", + "finding": line.replace("Warning:", "").strip(), + "severity": "high", + }) + return findings + + +def check_proc_sys_discrepancy(): + """Compare /proc/modules with /sys/module for hidden modules.""" + findings = [] + proc_modules = set() + sys_modules = set() + try: + with open("/proc/modules") as f: + for line in f: + proc_modules.add(line.split()[0]) + except (FileNotFoundError, PermissionError): + return findings + try: + sys_modules = set(os.listdir("/sys/module")) + except (FileNotFoundError, PermissionError): + return findings + only_in_sys = sys_modules - proc_modules + for mod in only_in_sys: + if not os.path.exists(f"/sys/module/{mod}/initstate"): + continue + findings.append({ + "module": mod, "indicator": "proc_sys_discrepancy", + "severity": "high", + "detail": f"Module '{mod}' in /sys/module but missing from /proc/modules", + }) + return findings + + +def generate_report(syscall_hooks, hidden_mods, idt_hooks, rkhunter_findings, proc_findings, source): + all_findings = syscall_hooks + hidden_mods + idt_hooks + rkhunter_findings + proc_findings + critical = sum(1 for f in all_findings if f.get("severity") == "critical") + return { + "timestamp": datetime.utcnow().isoformat(), + "analysis_source": source, + "syscall_hooks": syscall_hooks, + "hidden_modules": hidden_mods, + "idt_hooks": idt_hooks, + "rkhunter_warnings": rkhunter_findings, + "proc_sys_discrepancies": proc_findings, + "total_findings": len(all_findings), + "critical_findings": critical, + "rootkit_detected": critical > 0, + } + + +def main(): + parser = argparse.ArgumentParser(description="Linux Kernel Rootkit Detection Agent") + parser.add_argument("--memory-dump", help="Path to Linux memory dump for Volatility3 analysis") + parser.add_argument("--isf-url", help="Volatility3 ISF symbol table URL") + parser.add_argument("--live-scan", action="store_true", help="Run rkhunter + /proc analysis on live system") + parser.add_argument("--output", default="rootkit_detection_report.json") + args = parser.parse_args() + + syscall_hooks, hidden_mods, idt_hooks = [], [], [] + rkhunter_findings, proc_findings = [], [] + source = "none" + if args.memory_dump: + source = f"memory_dump:{args.memory_dump}" + syscall_hooks = check_syscall_hooks(args.memory_dump, args.isf_url) + hidden_mods = detect_hidden_modules(args.memory_dump, args.isf_url) + idt_hooks = check_idt_hooks(args.memory_dump, args.isf_url) + if args.live_scan: + source = "live_system" if source == "none" else source + "+live_system" + rkhunter_findings = run_rkhunter() + proc_findings = check_proc_sys_discrepancy() + report = generate_report(syscall_hooks, hidden_mods, idt_hooks, rkhunter_findings, proc_findings, source) + with open(args.output, "w") as f: + json.dump(report, f, indent=2, default=str) + logger.info("Rootkit scan: %d findings (%d critical), rootkit detected: %s", + report["total_findings"], report["critical_findings"], report["rootkit_detected"]) + print(json.dumps(report, indent=2, default=str)) + + +if __name__ == "__main__": + main() diff --git a/personas/_shared/skills/analyzing-linux-system-artifacts/LICENSE b/personas/_shared/skills/analyzing-linux-system-artifacts/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-linux-system-artifacts/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-linux-system-artifacts/SKILL.md b/personas/_shared/skills/analyzing-linux-system-artifacts/SKILL.md new file mode 100644 index 0000000..1bff8d2 --- /dev/null +++ b/personas/_shared/skills/analyzing-linux-system-artifacts/SKILL.md @@ -0,0 +1,332 @@ +--- +name: analyzing-linux-system-artifacts +description: Examine Linux system artifacts including auth logs, cron jobs, shell history, and system configuration to uncover + evidence of compromise or unauthorized activity. +domain: cybersecurity +subdomain: digital-forensics +tags: +- forensics +- linux-forensics +- system-artifacts +- log-analysis +- persistence-detection +- incident-investigation +version: '1.0' +author: mahipal +license: Apache-2.0 +nist_csf: +- RS.AN-01 +- RS.AN-03 +- DE.AE-02 +- RS.MA-01 +--- + +# Analyzing Linux System Artifacts + +## When to Use +- When investigating a compromised Linux server or workstation +- For identifying persistence mechanisms (cron, systemd, SSH keys) +- When tracing user activity through shell history and authentication logs +- During incident response to determine the scope of a Linux-based breach +- For detecting rootkits, backdoors, and unauthorized modifications + +## Prerequisites +- Forensic image or live access to the Linux system (read-only) +- Understanding of Linux file system hierarchy (FHS) +- Knowledge of common Linux logging locations (/var/log/) +- Tools: chkrootkit, rkhunter, AIDE, auditd logs +- Familiarity with systemd, cron, and PAM configurations +- Root access for complete artifact collection + +## Workflow + +### Step 1: Mount and Collect System Artifacts + +```bash +# Mount forensic image read-only +mount -o ro,loop,offset=$((2048*512)) /cases/case-2024-001/images/linux_evidence.dd /mnt/evidence + +# Create collection directories +mkdir -p /cases/case-2024-001/linux/{logs,config,users,persistence,network} + +# Collect authentication logs +cp /mnt/evidence/var/log/auth.log* /cases/case-2024-001/linux/logs/ +cp /mnt/evidence/var/log/secure* /cases/case-2024-001/linux/logs/ +cp /mnt/evidence/var/log/syslog* /cases/case-2024-001/linux/logs/ +cp /mnt/evidence/var/log/kern.log* /cases/case-2024-001/linux/logs/ +cp /mnt/evidence/var/log/audit/audit.log* /cases/case-2024-001/linux/logs/ +cp /mnt/evidence/var/log/wtmp /cases/case-2024-001/linux/logs/ +cp /mnt/evidence/var/log/btmp /cases/case-2024-001/linux/logs/ +cp /mnt/evidence/var/log/lastlog /cases/case-2024-001/linux/logs/ +cp /mnt/evidence/var/log/faillog /cases/case-2024-001/linux/logs/ + +# Collect user artifacts +for user_dir in /mnt/evidence/home/*/; do + username=$(basename "$user_dir") + mkdir -p /cases/case-2024-001/linux/users/$username + cp "$user_dir"/.bash_history /cases/case-2024-001/linux/users/$username/ 2>/dev/null + cp "$user_dir"/.zsh_history /cases/case-2024-001/linux/users/$username/ 2>/dev/null + cp -r "$user_dir"/.ssh/ /cases/case-2024-001/linux/users/$username/ 2>/dev/null + cp "$user_dir"/.bashrc /cases/case-2024-001/linux/users/$username/ 2>/dev/null + cp "$user_dir"/.profile /cases/case-2024-001/linux/users/$username/ 2>/dev/null + cp "$user_dir"/.viminfo /cases/case-2024-001/linux/users/$username/ 2>/dev/null + cp "$user_dir"/.wget-hsts /cases/case-2024-001/linux/users/$username/ 2>/dev/null + cp "$user_dir"/.python_history /cases/case-2024-001/linux/users/$username/ 2>/dev/null +done + +# Collect root user artifacts +cp /mnt/evidence/root/.bash_history /cases/case-2024-001/linux/users/root/ 2>/dev/null +cp -r /mnt/evidence/root/.ssh/ /cases/case-2024-001/linux/users/root/ 2>/dev/null + +# Collect system configuration +cp /mnt/evidence/etc/passwd /cases/case-2024-001/linux/config/ +cp /mnt/evidence/etc/shadow /cases/case-2024-001/linux/config/ +cp /mnt/evidence/etc/group /cases/case-2024-001/linux/config/ +cp /mnt/evidence/etc/sudoers /cases/case-2024-001/linux/config/ +cp -r /mnt/evidence/etc/sudoers.d/ /cases/case-2024-001/linux/config/ +cp /mnt/evidence/etc/hosts /cases/case-2024-001/linux/config/ +cp /mnt/evidence/etc/resolv.conf /cases/case-2024-001/linux/config/ +cp -r /mnt/evidence/etc/ssh/ /cases/case-2024-001/linux/config/ +``` + +### Step 2: Analyze User Accounts and Authentication + +```bash +# Analyze user accounts for anomalies +python3 << 'PYEOF' +print("=== USER ACCOUNT ANALYSIS ===\n") + +# Parse /etc/passwd +with open('/cases/case-2024-001/linux/config/passwd') as f: + for line in f: + parts = line.strip().split(':') + if len(parts) >= 7: + username, _, uid, gid, comment, home, shell = parts[0], parts[1], int(parts[2]), int(parts[3]), parts[4], parts[5], parts[6] + + # Flag accounts with UID 0 (root equivalent) + if uid == 0 and username != 'root': + print(f" ALERT: UID 0 account: {username} (shell: {shell})") + + # Flag accounts with login shells that shouldn't have them + if shell not in ('/bin/false', '/usr/sbin/nologin', '/bin/sync') and uid >= 1000: + print(f" User: {username} (UID:{uid}, Shell:{shell}, Home:{home})") + + # Flag system accounts with login shells + if uid < 1000 and uid > 0 and shell in ('/bin/bash', '/bin/sh', '/bin/zsh'): + print(f" WARNING: System account with shell: {username} (UID:{uid}, Shell:{shell})") + +# Parse /etc/shadow for account status +print("\n=== PASSWORD STATUS ===") +with open('/cases/case-2024-001/linux/config/shadow') as f: + for line in f: + parts = line.strip().split(':') + if len(parts) >= 3: + username = parts[0] + pwd_hash = parts[1] + last_change = parts[2] + + if pwd_hash and pwd_hash not in ('*', '!', '!!', ''): + hash_type = 'Unknown' + if pwd_hash.startswith('$6$'): hash_type = 'SHA-512' + elif pwd_hash.startswith('$5$'): hash_type = 'SHA-256' + elif pwd_hash.startswith('$y$'): hash_type = 'yescrypt' + elif pwd_hash.startswith('$1$'): hash_type = 'MD5 (WEAK)' + print(f" {username}: {hash_type} hash, last changed: day {last_change}") +PYEOF + +# Analyze login history +last -f /cases/case-2024-001/linux/logs/wtmp > /cases/case-2024-001/linux/analysis/login_history.txt +lastb -f /cases/case-2024-001/linux/logs/btmp > /cases/case-2024-001/linux/analysis/failed_logins.txt 2>/dev/null +``` + +### Step 3: Examine Persistence Mechanisms + +```bash +# Check cron jobs for all users +echo "=== CRON JOBS ===" > /cases/case-2024-001/linux/persistence/cron_analysis.txt + +# System cron +for cronfile in /mnt/evidence/etc/crontab /mnt/evidence/etc/cron.d/*; do + echo "--- $cronfile ---" >> /cases/case-2024-001/linux/persistence/cron_analysis.txt + cat "$cronfile" 2>/dev/null >> /cases/case-2024-001/linux/persistence/cron_analysis.txt + echo "" >> /cases/case-2024-001/linux/persistence/cron_analysis.txt +done + +# User cron tabs +for cronfile in /mnt/evidence/var/spool/cron/crontabs/*; do + echo "--- User crontab: $(basename $cronfile) ---" >> /cases/case-2024-001/linux/persistence/cron_analysis.txt + cat "$cronfile" 2>/dev/null >> /cases/case-2024-001/linux/persistence/cron_analysis.txt + echo "" >> /cases/case-2024-001/linux/persistence/cron_analysis.txt +done + +# Check systemd services for persistence +echo "=== SYSTEMD SERVICES ===" > /cases/case-2024-001/linux/persistence/systemd_analysis.txt +find /mnt/evidence/etc/systemd/system/ -name "*.service" -newer /mnt/evidence/etc/os-release \ + >> /cases/case-2024-001/linux/persistence/systemd_analysis.txt + +for svc in /mnt/evidence/etc/systemd/system/*.service; do + echo "--- $(basename $svc) ---" >> /cases/case-2024-001/linux/persistence/systemd_analysis.txt + cat "$svc" >> /cases/case-2024-001/linux/persistence/systemd_analysis.txt + echo "" >> /cases/case-2024-001/linux/persistence/systemd_analysis.txt +done + +# Check authorized SSH keys (backdoor detection) +echo "=== SSH AUTHORIZED KEYS ===" > /cases/case-2024-001/linux/persistence/ssh_keys.txt +find /mnt/evidence/home/ /mnt/evidence/root/ -name "authorized_keys" -exec sh -c \ + 'echo "--- {} ---"; cat {}; echo ""' \; >> /cases/case-2024-001/linux/persistence/ssh_keys.txt + +# Check rc.local and init scripts +cat /mnt/evidence/etc/rc.local 2>/dev/null > /cases/case-2024-001/linux/persistence/rc_local.txt + +# Check /etc/profile.d/ for login-triggered scripts +ls -la /mnt/evidence/etc/profile.d/ > /cases/case-2024-001/linux/persistence/profile_scripts.txt + +# Check for LD_PRELOAD hijacking +grep -r "LD_PRELOAD" /mnt/evidence/etc/ 2>/dev/null > /cases/case-2024-001/linux/persistence/ld_preload.txt +cat /mnt/evidence/etc/ld.so.preload 2>/dev/null >> /cases/case-2024-001/linux/persistence/ld_preload.txt +``` + +### Step 4: Analyze Shell History and Command Execution + +```bash +# Analyze bash history for each user +python3 << 'PYEOF' +import os, glob + +print("=== SHELL HISTORY ANALYSIS ===\n") + +suspicious_commands = [ + 'wget', 'curl', 'nc ', 'ncat', 'netcat', 'python -c', 'python3 -c', + 'perl -e', 'base64', 'chmod 777', 'chmod +s', '/dev/tcp', '/dev/udp', + 'nmap', 'masscan', 'hydra', 'john', 'hashcat', 'passwd', 'useradd', + 'iptables -F', 'ufw disable', 'history -c', 'rm -rf /', 'dd if=', + 'crontab', 'at ', 'systemctl enable', 'ssh-keygen', 'scp ', 'rsync', + 'tar czf', 'zip -r', 'openssl enc', 'gpg --encrypt', 'shred', + 'chattr', 'setfacl', 'awk', '/tmp/', '/dev/shm/' +] + +for hist_file in glob.glob('/cases/case-2024-001/linux/users/*/.bash_history'): + username = hist_file.split('/')[-2] + print(f"User: {username}") + + with open(hist_file, 'r', errors='ignore') as f: + lines = f.readlines() + + print(f" Total commands: {len(lines)}") + flagged = [] + for i, line in enumerate(lines): + line = line.strip() + for cmd in suspicious_commands: + if cmd in line.lower(): + flagged.append((i+1, line)) + break + + if flagged: + print(f" Suspicious commands: {len(flagged)}") + for lineno, cmd in flagged: + print(f" Line {lineno}: {cmd[:120]}") + print() +PYEOF +``` + +### Step 5: Check for Rootkits and Modified Binaries + +```bash +# Check for known rootkit indicators +# Compare system binary hashes against known-good +find /mnt/evidence/usr/bin/ /mnt/evidence/usr/sbin/ /mnt/evidence/bin/ /mnt/evidence/sbin/ \ + -type f -executable -exec sha256sum {} \; > /cases/case-2024-001/linux/analysis/binary_hashes.txt + +# Check for SUID/SGID binaries (potential privilege escalation) +find /mnt/evidence/ -perm -4000 -type f 2>/dev/null > /cases/case-2024-001/linux/analysis/suid_files.txt +find /mnt/evidence/ -perm -2000 -type f 2>/dev/null > /cases/case-2024-001/linux/analysis/sgid_files.txt + +# Check for suspicious files in /tmp and /dev/shm +find /mnt/evidence/tmp/ /mnt/evidence/dev/shm/ -type f 2>/dev/null \ + -exec file {} \; > /cases/case-2024-001/linux/analysis/tmp_files.txt + +# Check for hidden files and directories +find /mnt/evidence/ -name ".*" -not -path "*/\." -type f 2>/dev/null | \ + head -100 > /cases/case-2024-001/linux/analysis/hidden_files.txt + +# Check kernel modules +ls -la /mnt/evidence/lib/modules/$(ls /mnt/evidence/lib/modules/ | head -1)/extra/ 2>/dev/null \ + > /cases/case-2024-001/linux/analysis/extra_modules.txt + +# Check for modified PAM configuration (authentication backdoors) +diff /mnt/evidence/etc/pam.d/ /cases/baseline/pam.d/ 2>/dev/null \ + > /cases/case-2024-001/linux/analysis/pam_changes.txt +``` + +## Key Concepts + +| Concept | Description | +|---------|-------------| +| /var/log/auth.log | Primary authentication log on Debian/Ubuntu systems | +| /var/log/secure | Primary authentication log on RHEL/CentOS systems | +| wtmp/btmp | Binary logs recording successful and failed login sessions | +| .bash_history | User command history file (can be cleared by attackers) | +| crontab | Scheduled task system commonly used for persistence | +| authorized_keys | SSH public keys granting passwordless access to an account | +| SUID bit | File permission allowing execution as the file owner (privilege escalation vector) | +| LD_PRELOAD | Environment variable that loads a shared library before all others (hooking technique) | + +## Tools & Systems + +| Tool | Purpose | +|------|---------| +| chkrootkit | Rootkit detection scanner for Linux systems | +| rkhunter | Rootkit Hunter - checks for rootkits, backdoors, and local exploits | +| AIDE | Advanced Intrusion Detection Environment - file integrity monitor | +| auditd | Linux audit framework for system call and file access monitoring | +| last/lastb | Parse wtmp/btmp for login and failed login history | +| Plaso/log2timeline | Super-timeline creation including Linux artifacts | +| osquery | SQL-based system querying for live forensic investigation | +| Velociraptor | Endpoint agent with Linux artifact collection capabilities | + +## Common Scenarios + +**Scenario 1: SSH Brute Force Followed by Compromise** +Analyze auth.log for failed SSH attempts followed by success, identify the attacking IP, check .bash_history for post-compromise commands, examine authorized_keys for added backdoor keys, check crontab for persistence, review network connections. + +**Scenario 2: Web Server Compromise via Application Vulnerability** +Examine web server access and error logs for exploitation attempts, check /tmp and /dev/shm for webshells, analyze the web server user's activity (www-data), check for privilege escalation via SUID binaries or kernel exploits, review outbound connections. + +**Scenario 3: Insider Threat on Database Server** +Analyze the suspect user's bash_history for database dump commands, check for large tar/zip files in home directory or /tmp, examine scp/rsync commands for data transfer, review cron jobs for automated exfiltration, check USB device logs. + +**Scenario 4: Crypto-Miner on Cloud Instance** +Check for high-CPU processes in /proc (live) or systemd service files, examine crontab entries for miner restart scripts, check /tmp for mining binaries, analyze network connections for mining pool communications, review authorized_keys for attacker access. + +## Output Format + +``` +Linux Forensics Summary: + System: webserver01 (Ubuntu 22.04 LTS) + Hostname: webserver01.corp.local + Kernel: 5.15.0-91-generic + + User Accounts: + Total: 25 (3 with UID 0 - 1 ANOMALOUS) + Interactive shells: 8 users + Recently created: admin2 (created 2024-01-15) + + Authentication Events: + Successful SSH logins: 456 + Failed SSH attempts: 12,345 (from 23 unique IPs) + Sudo executions: 89 + + Persistence Mechanisms Found: + Cron jobs: 3 suspicious (reverse shell, miner restart) + Systemd services: 1 unknown (update-checker.service) + SSH keys: 2 unauthorized keys in root authorized_keys + rc.local: Modified with download cradle + + Suspicious Activity: + - bash_history contains wget to pastebin URL + - SUID binary /tmp/.hidden/escalate found + - /dev/shm/ contains compiled ELF binary + - LD_PRELOAD in /etc/ld.so.preload pointing to /lib/.hidden.so + + Report: /cases/case-2024-001/linux/analysis/ +``` diff --git a/personas/_shared/skills/analyzing-linux-system-artifacts/references/api-reference.md b/personas/_shared/skills/analyzing-linux-system-artifacts/references/api-reference.md new file mode 100644 index 0000000..c1de815 --- /dev/null +++ b/personas/_shared/skills/analyzing-linux-system-artifacts/references/api-reference.md @@ -0,0 +1,114 @@ +# API Reference: Linux Forensic Artifact Analysis Tools + +## Key Artifact Locations + +| Artifact | Path | Description | +|----------|------|-------------| +| Auth logs | `/var/log/auth.log` (Debian) `/var/log/secure` (RHEL) | Authentication events | +| Login history | `/var/log/wtmp` | Successful logins (binary, use `last`) | +| Failed logins | `/var/log/btmp` | Failed logins (binary, use `lastb`) | +| Bash history | `~/.bash_history` | Command history per user | +| SSH keys | `~/.ssh/authorized_keys` | Authorized public keys | +| Crontab | `/etc/crontab`, `/var/spool/cron/crontabs/` | Scheduled tasks | +| Systemd services | `/etc/systemd/system/` | Service definitions | +| LD_PRELOAD | `/etc/ld.so.preload` | Shared library preloading | +| SUID binaries | `find / -perm -4000` | Setuid executables | + +## last / lastb - Login History + +### Syntax +```bash +last -f /var/log/wtmp # Successful logins +lastb -f /var/log/btmp # Failed logins +last -i -f /var/log/wtmp # Show IP addresses +last -s 2024-01-15 -t 2024-01-20 # Date range filter +``` + +### Output Format +``` +user pts/0 192.168.1.50 Mon Jan 15 09:00 still logged in +``` + +## chkrootkit - Rootkit Scanner + +### Syntax +```bash +chkrootkit # Full scan +chkrootkit -r /mnt/evidence # Scan mounted evidence +chkrootkit -q # Quiet (infected only) +``` + +## rkhunter - Rootkit Hunter + +### Syntax +```bash +rkhunter --check # Full system check +rkhunter --check --rootdir /mnt/ev # Check evidence root +rkhunter --list tests # List available tests +rkhunter --propupd # Update file properties DB +``` + +### Check Categories +| Check | Description | +|-------|-------------| +| `rootkits` | Known rootkit signatures | +| `trojans` | Trojanized system binaries | +| `properties` | File permission anomalies | +| `filesystem` | Hidden files and directories | + +## auditd Log Parsing + +### ausearch Syntax +```bash +ausearch -m execve -ts recent # Recent command execution +ausearch -m USER_AUTH -ts today # Authentication events +ausearch -k suspicious_activity # Custom audit rule key +ausearch -ua 0 -ts today # Root user actions +``` + +### aureport Syntax +```bash +aureport --auth # Authentication summary +aureport --login # Login summary +aureport --file # File access summary +aureport --summary # Overall summary +``` + +## osquery - SQL-based System Queries + +### Syntax +```bash +osqueryi "SELECT * FROM users WHERE uid = 0" +osqueryi "SELECT * FROM crontab" +osqueryi "SELECT * FROM authorized_keys" +osqueryi "SELECT * FROM suid_bin" +osqueryi "SELECT * FROM process_open_sockets" +``` + +### Key Tables +| Table | Content | +|-------|---------| +| `users` | User account information | +| `crontab` | Cron job entries | +| `authorized_keys` | SSH authorized keys | +| `suid_bin` | SUID binaries | +| `process_open_sockets` | Network connections by process | +| `shell_history` | Command history entries | + +## Plaso / log2timeline - Super Timeline + +### Syntax +```bash +log2timeline.py /cases/timeline.plaso /mnt/evidence +psort.py -o l2tcsv /cases/timeline.plaso > timeline.csv +psort.py -o l2tcsv /cases/timeline.plaso "date > '2024-01-15'" +``` + +## AIDE - File Integrity + +### Syntax +```bash +aide --init # Initialize database +aide --check # Check for changes +aide --compare # Compare databases +``` diff --git a/personas/_shared/skills/analyzing-linux-system-artifacts/scripts/agent.py b/personas/_shared/skills/analyzing-linux-system-artifacts/scripts/agent.py new file mode 100644 index 0000000..118f5bc --- /dev/null +++ b/personas/_shared/skills/analyzing-linux-system-artifacts/scripts/agent.py @@ -0,0 +1,263 @@ +#!/usr/bin/env python3 +"""Linux system artifact forensics agent for investigating compromised systems.""" + +import os +import sys +import glob +import shlex +import subprocess + + +def run_cmd(cmd): + """Execute a command and return output.""" + if isinstance(cmd, str): + cmd = shlex.split(cmd) + result = subprocess.run(cmd, capture_output=True, text=True, timeout=30) + return result.stdout.strip(), result.stderr.strip(), result.returncode + + +def analyze_passwd(passwd_path): + """Analyze /etc/passwd for suspicious accounts.""" + findings = [] + with open(passwd_path, "r") as f: + for line in f: + parts = line.strip().split(":") + if len(parts) < 7: + continue + username, _, uid, gid = parts[0], parts[1], int(parts[2]), int(parts[3]) + home, shell = parts[5], parts[6] + if uid == 0 and username != "root": + findings.append({ + "severity": "CRITICAL", + "finding": f"UID 0 account: {username} (shell: {shell})", + }) + login_shells = ["/bin/bash", "/bin/sh", "/bin/zsh", "/usr/bin/zsh"] + if uid < 1000 and uid > 0 and shell in login_shells: + findings.append({ + "severity": "WARNING", + "finding": f"System account with login shell: {username} (UID:{uid})", + }) + if uid >= 1000 and shell not in ["/bin/false", "/usr/sbin/nologin", "/bin/sync"]: + findings.append({ + "severity": "INFO", + "finding": f"Interactive user: {username} (UID:{uid}, Home:{home})", + }) + return findings + + +def analyze_shadow(shadow_path): + """Analyze /etc/shadow for password hash types and status.""" + findings = [] + with open(shadow_path, "r") as f: + for line in f: + parts = line.strip().split(":") + if len(parts) < 3: + continue + username = parts[0] + pwd_hash = parts[1] + if pwd_hash and pwd_hash not in ("*", "!", "!!", ""): + hash_type = "Unknown" + if pwd_hash.startswith("$6$"): + hash_type = "SHA-512" + elif pwd_hash.startswith("$5$"): + hash_type = "SHA-256" + elif pwd_hash.startswith("$y$"): + hash_type = "yescrypt" + elif pwd_hash.startswith("$1$"): + hash_type = "MD5 (WEAK)" + findings.append({ + "severity": "WARNING", + "finding": f"{username} uses weak MD5 password hash", + }) + findings.append({ + "severity": "INFO", + "finding": f"{username}: {hash_type} hash, last changed day {parts[2]}", + }) + return findings + + +def analyze_bash_history(history_path, username="unknown"): + """Analyze bash history for suspicious commands.""" + suspicious_patterns = [ + "wget", "curl", "nc ", "ncat", "netcat", "python -c", "python3 -c", + "perl -e", "base64", "chmod 777", "chmod +s", "/dev/tcp", "/dev/udp", + "nmap", "masscan", "hydra", "john", "hashcat", "passwd", "useradd", + "iptables -F", "ufw disable", "history -c", "rm -rf", "dd if=", + "crontab", "systemctl enable", "ssh-keygen", "scp ", "rsync", + "/tmp/", "/dev/shm/", "mkfifo", "socat", + ] + findings = [] + with open(history_path, "r", errors="ignore") as f: + lines = f.readlines() + for i, line in enumerate(lines): + line_stripped = line.strip() + for pattern in suspicious_patterns: + if pattern in line_stripped.lower(): + findings.append({ + "user": username, + "line_number": i + 1, + "command": line_stripped[:200], + "matched_pattern": pattern, + }) + break + return findings + + +def check_cron_persistence(evidence_root): + """Check cron jobs for persistence mechanisms.""" + findings = [] + cron_paths = [ + os.path.join(evidence_root, "etc/crontab"), + *glob.glob(os.path.join(evidence_root, "etc/cron.d/*")), + *glob.glob(os.path.join(evidence_root, "var/spool/cron/crontabs/*")), + ] + for cron_path in cron_paths: + if os.path.exists(cron_path) and os.path.isfile(cron_path): + with open(cron_path, "r", errors="ignore") as f: + for line in f: + line = line.strip() + if line and not line.startswith("#"): + suspicious = any( + p in line.lower() + for p in ["wget", "curl", "/tmp/", "/dev/shm/", "base64", + "python", "bash -i", "reverse", "nc ", "ncat"] + ) + if suspicious: + findings.append({ + "severity": "HIGH", + "source": cron_path, + "entry": line[:200], + }) + return findings + + +def check_ssh_keys(evidence_root): + """Check for unauthorized SSH authorized_keys.""" + findings = [] + key_files = glob.glob( + os.path.join(evidence_root, "home/*/.ssh/authorized_keys") + ) + glob.glob( + os.path.join(evidence_root, "root/.ssh/authorized_keys") + ) + for key_file in key_files: + if os.path.exists(key_file): + with open(key_file, "r") as f: + keys = [l.strip() for l in f if l.strip() and not l.startswith("#")] + if keys: + findings.append({ + "file": key_file, + "key_count": len(keys), + "keys": [k[:80] + "..." for k in keys], + }) + return findings + + +def check_systemd_persistence(evidence_root): + """Check for suspicious systemd service files.""" + findings = [] + service_dirs = [ + os.path.join(evidence_root, "etc/systemd/system"), + os.path.join(evidence_root, "usr/lib/systemd/system"), + ] + for svc_dir in service_dirs: + if not os.path.exists(svc_dir): + continue + for svc_file in glob.glob(os.path.join(svc_dir, "*.service")): + with open(svc_file, "r", errors="ignore") as f: + content = f.read() + suspicious = any( + p in content.lower() + for p in ["/tmp/", "/dev/shm/", "wget", "curl", "reverse", + "bash -i", "nc ", "python", "base64"] + ) + if suspicious: + findings.append({ + "severity": "HIGH", + "file": svc_file, + "preview": content[:300], + }) + return findings + + +def check_ld_preload(evidence_root): + """Check for LD_PRELOAD rootkit indicators.""" + findings = [] + preload_path = os.path.join(evidence_root, "etc/ld.so.preload") + if os.path.exists(preload_path): + with open(preload_path, "r") as f: + content = f.read().strip() + if content: + findings.append({ + "severity": "CRITICAL", + "finding": f"/etc/ld.so.preload contains: {content}", + }) + return findings + + +def find_suid_binaries(evidence_root): + """Find SUID/SGID binaries (potential privilege escalation).""" + result = subprocess.run( + ["find", evidence_root, "-perm", "-4000", "-type", "f"], + capture_output=True, text=True, timeout=30 + ) + stdout = result.stdout.strip() + return stdout.splitlines() if result.returncode == 0 and stdout else [] + + +def find_suspicious_tmp_files(evidence_root): + """Find suspicious files in /tmp and /dev/shm.""" + findings = [] + for tmp_dir in ["tmp", "dev/shm"]: + full_path = os.path.join(evidence_root, tmp_dir) + if os.path.exists(full_path): + for root, dirs, files in os.walk(full_path): + for fname in files: + fpath = os.path.join(root, fname) + findings.append(fpath) + return findings + + +if __name__ == "__main__": + print("=" * 60) + print("Linux System Artifacts Forensics Agent") + print("User accounts, persistence, shell history, rootkit detection") + print("=" * 60) + + evidence_root = sys.argv[1] if len(sys.argv) > 1 else "/mnt/evidence" + + if os.path.exists(evidence_root): + print(f"\n[*] Examining evidence root: {evidence_root}") + + passwd_path = os.path.join(evidence_root, "etc/passwd") + if os.path.exists(passwd_path): + print("\n--- User Account Analysis ---") + for f in analyze_passwd(passwd_path): + print(f" [{f['severity']}] {f['finding']}") + + print("\n--- Cron Persistence ---") + cron = check_cron_persistence(evidence_root) + for c in cron: + print(f" [{c['severity']}] {c['source']}: {c['entry'][:80]}") + + print("\n--- SSH Authorized Keys ---") + ssh = check_ssh_keys(evidence_root) + for s in ssh: + print(f" {s['file']}: {s['key_count']} keys") + + print("\n--- Systemd Persistence ---") + systemd = check_systemd_persistence(evidence_root) + for s in systemd: + print(f" [{s['severity']}] {s['file']}") + + print("\n--- LD_PRELOAD Rootkit Check ---") + ld = check_ld_preload(evidence_root) + for l in ld: + print(f" [{l['severity']}] {l['finding']}") + + print("\n--- Suspicious Temp Files ---") + tmp = find_suspicious_tmp_files(evidence_root) + for t in tmp[:20]: + print(f" {t}") + else: + print(f"\n[DEMO] Usage: python agent.py ") + print("[*] Mount a forensic image and provide the path for analysis.") diff --git a/personas/_shared/skills/analyzing-lnk-file-and-jump-list-artifacts/LICENSE b/personas/_shared/skills/analyzing-lnk-file-and-jump-list-artifacts/LICENSE new file mode 100644 index 0000000..d885118 --- /dev/null +++ b/personas/_shared/skills/analyzing-lnk-file-and-jump-list-artifacts/LICENSE @@ -0,0 +1,201 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to the Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by the Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding any notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. Please do not remove or change + the license header comment from a contributed file except when + necessary. + + Copyright 2026 mukul975 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/personas/_shared/skills/analyzing-lnk-file-and-jump-list-artifacts/SKILL.md b/personas/_shared/skills/analyzing-lnk-file-and-jump-list-artifacts/SKILL.md new file mode 100644 index 0000000..aced390 --- /dev/null +++ b/personas/_shared/skills/analyzing-lnk-file-and-jump-list-artifacts/SKILL.md @@ -0,0 +1,286 @@ +--- +name: analyzing-lnk-file-and-jump-list-artifacts +description: Analyze Windows LNK shortcut files and Jump List artifacts to establish evidence of file access, program execution, + and user activity using LECmd, JLECmd, and manual binary parsing of the Shell Link Binary format. +domain: cybersecurity +subdomain: digital-forensics +tags: +- lnk-files +- jump-lists +- lecmd +- jlecmd +- windows-forensics +- shell-link +- user-activity +- file-access +- program-execution +- recent-files +version: '1.0' +author: mahipal +license: Apache-2.0 +nist_csf: +- RS.AN-01 +- RS.AN-03 +- DE.AE-02 +- RS.MA-01 +--- + +# Analyzing LNK File and Jump List Artifacts + +## Overview + +Windows LNK (shortcut) files and Jump Lists are critical forensic artifacts that provide evidence of file access, program execution, and user behavior. LNK files are created automatically when a user opens a file through Windows Explorer or the Open/Save dialog, storing metadata about the target file including its original path, timestamps, volume serial number, NetBIOS name, and MAC address of the host system. Jump Lists, introduced in Windows 7, extend this by maintaining per-application lists of recently and frequently accessed files. These artifacts persist even after the target files are deleted, making them invaluable for establishing that a user accessed specific files at specific times. + + +## When to Use + +- When investigating security incidents that require analyzing lnk file and jump list artifacts +- When building detection rules or threat hunting queries for this domain +- When SOC analysts need structured procedures for this analysis type +- When validating security monitoring coverage for related attack techniques + +## Prerequisites + +- LECmd (Eric Zimmerman) for LNK file parsing +- JLECmd (Eric Zimmerman) for Jump List parsing +- Python 3.8+ with pylnk3 or LnkParse3 libraries +- Forensic image or triage collection from Windows system +- Timeline Explorer for CSV analysis + +## LNK File Locations + +| Location | Description | +|----------|-------------| +| `%USERPROFILE%\AppData\Roaming\Microsoft\Windows\Recent\` | Recent files accessed | +| `%USERPROFILE%\Desktop\` | User-created shortcuts | +| `%USERPROFILE%\AppData\Roaming\Microsoft\Windows\Start Menu\` | Start Menu shortcuts | +| `%USERPROFILE%\AppData\Roaming\Microsoft\Office\Recent\` | Office recent documents | + +## LNK File Structure + +### Shell Link Header (76 bytes) + +| Offset | Size | Field | +|--------|------|-------| +| 0x00 | 4 | HeaderSize (always 0x0000004C) | +| 0x04 | 16 | LinkCLSID (always 00021401-0000-0000-C000-000000000046) | +| 0x14 | 4 | LinkFlags | +| 0x18 | 4 | FileAttributes | +| 0x1C | 8 | CreationTime (FILETIME) | +| 0x24 | 8 | AccessTime (FILETIME) | +| 0x2C | 8 | WriteTime (FILETIME) | +| 0x34 | 4 | FileSize of target | +| 0x38 | 4 | IconIndex | +| 0x3C | 4 | ShowCommand | +| 0x40 | 2 | HotKey | + +### Key Forensic Fields in LNK Files + +- **Target file timestamps**: Creation, access, modification times of the referenced file +- **Volume information**: Serial number, drive type, volume label +- **Network share information**: UNC path, share name +- **Machine identifiers**: NetBIOS name, MAC address (from TrackerDataBlock) +- **Distributed Link Tracking**: Machine ID and object GUID + +## Analysis with EZ Tools + +### LECmd - LNK File Parser + +```powershell +# Parse all LNK files in Recent folder +LECmd.exe -d "C:\Evidence\Users\suspect\AppData\Roaming\Microsoft\Windows\Recent" --csv C:\Output --csvf lnk_analysis.csv + +# Parse a single LNK file with full details +LECmd.exe -f "C:\Evidence\Users\suspect\Desktop\Confidential.docx.lnk" --json C:\Output + +# Parse LNK files with additional detail levels +LECmd.exe -d "C:\Evidence\Users\suspect\AppData\Roaming\Microsoft\Windows\Recent" --csv C:\Output --csvf lnk_all.csv --all +``` + +### JLECmd - Jump List Parser + +```powershell +# Parse Automatic Jump Lists +JLECmd.exe -d "C:\Evidence\Users\suspect\AppData\Roaming\Microsoft\Windows\Recent\AutomaticDestinations" --csv C:\Output --csvf jumplists_auto.csv + +# Parse Custom Jump Lists +JLECmd.exe -d "C:\Evidence\Users\suspect\AppData\Roaming\Microsoft\Windows\Recent\CustomDestinations" --csv C:\Output --csvf jumplists_custom.csv + +# Parse all jump lists with detailed output +JLECmd.exe -d "C:\Evidence\Users\suspect\AppData\Roaming\Microsoft\Windows\Recent\AutomaticDestinations" --csv C:\Output --csvf jumplists_auto.csv --ld +``` + +## Jump List Structure + +### Automatic Destinations (automaticDestinations-ms) + +These are OLE Compound files (Structured Storage) identified by AppID hash in the filename: + +| AppID Hash | Application | +|-----------|-------------| +| 5f7b5f1e01b83767 | Windows Explorer Pinned/Frequent | +| 1b4dd67f29cb1962 | Windows Explorer Recent | +| 9b9cdc69c1c24e2b | Notepad | +| a7bd71699cd38d1c | Notepad++ | +| 12dc1ea8e34b5a6 | Microsoft Paint | +| 7e4dca80246863e3 | Control Panel | +| 1cf97c38a5881255 | Microsoft Edge | +| f01b4d95cf55d32a | Windows Explorer | +| 9d1f905ce5044aee | Microsoft Excel | +| a4a5324453625195 | Microsoft Word | +| d00655d2aa12ff6d | Microsoft PowerPoint | +| bc03160ee1a59fc1 | Outlook | + +### Custom Destinations (customDestinations-ms) + +Created when users pin items to application jump lists. These files contain sequential LNK entries. + +## Python Analysis Script + +```python +import struct +import os +from datetime import datetime, timedelta + +FILETIME_EPOCH = datetime(1601, 1, 1) + +def filetime_to_datetime(filetime_bytes: bytes) -> datetime: + """Convert Windows FILETIME (100-ns intervals since 1601) to datetime.""" + ft = struct.unpack(" dict: + """Parse the Shell Link header from an LNK file.""" + with open(lnk_path, "rb") as f: + header = f.read(76) + + header_size = struct.unpack("