feat: major expansion — 3 new variants, enhanced build system, platform auto-install
New persona variants: - forge/frontend-design — DESIGN.md methodology, 58-brand reference, UI/UX intelligence - oracle/source-verification — 5-section forensic verification protocol (ethos/pathos/context/intent/logos) - sentinel/c2-hunting — 6-phase C2 hunting with beaconing detection, detection engineering Enhanced existing personas: - neo: Added Active Directory exploitation (Kerberoasting, DCSync, delegation), network pivoting, cloud attacks - frodo: Added response mode auto-detection, claim extraction, Devil's Advocate, explicit uncertainty tracking - ghost: Added cognitive warfare expertise (behavioral science weaponization, algorithmic amplification) Build system enhancements: - Cross-persona escalation graph auto-extracted → generated/_index/escalation_graph.json - Trigger→persona routing index → generated/_index/trigger_index.json - Quality validation with warnings for thin/missing sections - Section word counts injected into every output - Richer CATALOG.md with depth stats, escalation paths, trigger index Platform auto-install: - python3 build.py --install claude — 111 slash commands → ~/.claude/commands/ - python3 build.py --install antigravity — personas → ~/.config/antigravity/personas/ - python3 build.py --install gemini — Gems → generated/_gems/ - python3 build.py --install openclaw — IDENTITY.md + personas → generated/_openclaw/ - python3 build.py --install all — deploy to all platforms Shared reference library: - personas/_shared/kali-tools/ — 16 Kali Linux tool reference docs - personas/_shared/osint-sources/ — OSINT master reference - personas/_shared/ad-attack-tools/ — AD attack chain reference Stats: 29 personas, 111 variants, 59,712 words Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
289
build.py
289
build.py
@@ -133,7 +133,7 @@ def parse_persona_md(filepath: Path, flat_config: dict) -> dict:
|
||||
}
|
||||
|
||||
|
||||
def build_persona(persona_dir: Path, output_dir: Path, flat_config: dict, config: dict):
|
||||
def build_persona(persona_dir: Path, output_dir: Path, flat_config: dict, config: dict, escalation_graph: dict = None):
|
||||
"""Build all variants for a persona directory."""
|
||||
md_files = sorted(persona_dir.glob("*.md"))
|
||||
if not md_files:
|
||||
@@ -179,6 +179,17 @@ def build_persona(persona_dir: Path, output_dir: Path, flat_config: dict, config
|
||||
"regional_focus": config.get("regional_focus", {}),
|
||||
}
|
||||
|
||||
# Inject escalation graph for this persona
|
||||
if escalation_graph and persona_name in escalation_graph:
|
||||
output["escalates_to"] = escalation_graph[persona_name]
|
||||
|
||||
# Inject section word counts for quality tracking
|
||||
output["_stats"] = {
|
||||
"total_words": sum(len(s.split()) for s in parsed["sections"].values()),
|
||||
"sections": list(parsed["sections"].keys()),
|
||||
"section_count": len(parsed["sections"]),
|
||||
}
|
||||
|
||||
# Write YAML
|
||||
yaml_out = out_path / f"{variant}.yaml"
|
||||
yaml_out.write_text(
|
||||
@@ -200,14 +211,78 @@ def build_persona(persona_dir: Path, output_dir: Path, flat_config: dict, config
|
||||
return count
|
||||
|
||||
|
||||
def build_catalog(personas_dir: Path, output_dir: Path, config: dict):
|
||||
"""Generate CATALOG.md from all personas."""
|
||||
def build_escalation_graph(personas_dir: Path, flat_config: dict) -> dict:
|
||||
"""Extract cross-persona escalation paths from Boundaries sections."""
|
||||
graph = {} # {persona: [escalation_targets]}
|
||||
for persona_dir in sorted(personas_dir.iterdir()):
|
||||
if not persona_dir.is_dir() or persona_dir.name.startswith((".", "_")):
|
||||
continue
|
||||
general = persona_dir / "general.md"
|
||||
if not general.exists():
|
||||
continue
|
||||
parsed = parse_persona_md(general, flat_config)
|
||||
if not parsed:
|
||||
continue
|
||||
boundaries = parsed["sections"].get("boundaries", "")
|
||||
targets = re.findall(r"Escalate to \*\*(\w+)\*\*", boundaries)
|
||||
graph[persona_dir.name] = [t.lower() for t in targets]
|
||||
return graph
|
||||
|
||||
|
||||
def build_trigger_index(personas_dir: Path) -> dict:
|
||||
"""Build reverse index: trigger keyword → persona codenames for multi-agent routing."""
|
||||
index = {} # {trigger: [persona_names]}
|
||||
for persona_dir in sorted(personas_dir.iterdir()):
|
||||
if not persona_dir.is_dir() or persona_dir.name.startswith((".", "_")):
|
||||
continue
|
||||
meta_file = persona_dir / "_meta.yaml"
|
||||
if not meta_file.exists():
|
||||
continue
|
||||
meta = yaml.safe_load(meta_file.read_text(encoding="utf-8")) or {}
|
||||
triggers = meta.get("activation_triggers", [])
|
||||
for trigger in triggers:
|
||||
t = trigger.lower()
|
||||
if t not in index:
|
||||
index[t] = []
|
||||
index[t].append(persona_dir.name)
|
||||
return index
|
||||
|
||||
|
||||
def validate_persona(persona_name: str, parsed: dict) -> list:
|
||||
"""Validate persona structure and return warnings."""
|
||||
warnings = []
|
||||
required_sections = ["soul", "expertise", "methodology", "boundaries"]
|
||||
for section in required_sections:
|
||||
if section not in parsed.get("sections", {}):
|
||||
warnings.append(f"Missing section: {section}")
|
||||
elif len(parsed["sections"][section].split()) < 30:
|
||||
warnings.append(f"Thin section ({len(parsed['sections'][section].split())} words): {section}")
|
||||
|
||||
fm = parsed.get("metadata", {})
|
||||
for field in ["codename", "name", "domain", "address_to", "tone"]:
|
||||
if field not in fm:
|
||||
warnings.append(f"Missing frontmatter: {field}")
|
||||
|
||||
return warnings
|
||||
|
||||
|
||||
def build_catalog(personas_dir: Path, output_dir: Path, config: dict, flat_config: dict):
|
||||
"""Generate CATALOG.md with stats, escalation paths, and trigger index."""
|
||||
addresses = config.get("persona_defaults", {}).get("custom_addresses", {})
|
||||
|
||||
# Build escalation graph and trigger index
|
||||
escalation_graph = build_escalation_graph(personas_dir, flat_config)
|
||||
trigger_index = build_trigger_index(personas_dir)
|
||||
|
||||
catalog_lines = [
|
||||
"# Persona Catalog\n",
|
||||
f"_Auto-generated by build.py | User: {config.get('user', {}).get('name', 'default')}_\n",
|
||||
]
|
||||
|
||||
total_words = 0
|
||||
total_sections = 0
|
||||
all_warnings = []
|
||||
|
||||
for persona_dir in sorted(personas_dir.iterdir()):
|
||||
if not persona_dir.is_dir() or persona_dir.name.startswith((".", "_")):
|
||||
continue
|
||||
@@ -221,24 +296,86 @@ def build_catalog(personas_dir: Path, output_dir: Path, config: dict):
|
||||
address = addresses.get(persona_dir.name, meta.get("address_to", "N/A"))
|
||||
variants = [f.stem for f in sorted(persona_dir.glob("*.md")) if not f.name.startswith("_")]
|
||||
|
||||
# Parse general.md for stats
|
||||
general = persona_dir / "general.md"
|
||||
word_count = 0
|
||||
section_count = 0
|
||||
if general.exists():
|
||||
parsed = parse_persona_md(general, flat_config)
|
||||
if parsed:
|
||||
for s in parsed["sections"].values():
|
||||
word_count += len(s.split())
|
||||
section_count = len(parsed["sections"])
|
||||
# Validate
|
||||
warns = validate_persona(codename, parsed)
|
||||
for w in warns:
|
||||
all_warnings.append(f" {codename}: {w}")
|
||||
|
||||
total_words += word_count
|
||||
total_sections += section_count
|
||||
escalates_to = escalation_graph.get(persona_dir.name, [])
|
||||
|
||||
catalog_lines.append(f"## {codename} — {meta.get('role', 'Unknown')}")
|
||||
catalog_lines.append(f"- **Domain:** {meta.get('domain', 'N/A')}")
|
||||
catalog_lines.append(f"- **Hitap:** {address}")
|
||||
catalog_lines.append(f"- **Variants:** {', '.join(variants)}")
|
||||
catalog_lines.append(f"- **Depth:** {word_count:,} words, {section_count} sections")
|
||||
if escalates_to:
|
||||
catalog_lines.append(f"- **Escalates to:** {', '.join(escalates_to)}")
|
||||
catalog_lines.append("")
|
||||
|
||||
# Add trigger index section
|
||||
catalog_lines.append("---\n")
|
||||
catalog_lines.append("## Activation Trigger Index\n")
|
||||
catalog_lines.append("_Keyword → persona routing for multi-agent systems_\n")
|
||||
for trigger in sorted(trigger_index.keys()):
|
||||
personas = ", ".join(trigger_index[trigger])
|
||||
catalog_lines.append(f"- **{trigger}** → {personas}")
|
||||
catalog_lines.append("")
|
||||
|
||||
# Add stats
|
||||
catalog_lines.append("---\n")
|
||||
catalog_lines.append("## Build Statistics\n")
|
||||
catalog_lines.append(f"- Total prompt content: {total_words:,} words")
|
||||
catalog_lines.append(f"- Total sections: {total_sections}")
|
||||
catalog_lines.append(f"- Escalation connections: {sum(len(v) for v in escalation_graph.values())}")
|
||||
catalog_lines.append(f"- Unique triggers: {len(trigger_index)}")
|
||||
catalog_lines.append("")
|
||||
|
||||
catalog_path = personas_dir / "CATALOG.md"
|
||||
catalog_path.write_text("\n".join(catalog_lines), encoding="utf-8")
|
||||
print(f" Catalog: {catalog_path}")
|
||||
|
||||
# Write escalation graph and trigger index as JSON for API consumers
|
||||
index_path = output_dir / "_index"
|
||||
index_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def print_summary(config: dict, total_personas: int, total_variants: int):
|
||||
(index_path / "escalation_graph.json").write_text(
|
||||
json.dumps(escalation_graph, indent=2, ensure_ascii=False), encoding="utf-8"
|
||||
)
|
||||
(index_path / "trigger_index.json").write_text(
|
||||
json.dumps(trigger_index, indent=2, ensure_ascii=False), encoding="utf-8"
|
||||
)
|
||||
print(f" Index: {index_path}/escalation_graph.json, trigger_index.json")
|
||||
|
||||
# Print validation warnings
|
||||
if all_warnings:
|
||||
print(f"\n WARNINGS ({len(all_warnings)}):")
|
||||
for w in all_warnings:
|
||||
print(f" {w}")
|
||||
|
||||
return total_words
|
||||
|
||||
|
||||
def print_summary(config: dict, total_personas: int, total_variants: int, total_words: int = 0):
|
||||
"""Print build summary with config status."""
|
||||
print("\n" + "=" * 50)
|
||||
print(f"BUILD COMPLETE")
|
||||
print(f" Personas: {total_personas}")
|
||||
print(f" Variants: {total_variants}")
|
||||
print(f" Output: generated/")
|
||||
print(f" Personas: {total_personas}")
|
||||
print(f" Variants: {total_variants}")
|
||||
print(f" Words: {total_words:,}")
|
||||
print(f" Output: generated/")
|
||||
print(f" Index: generated/_index/")
|
||||
|
||||
if config:
|
||||
user = config.get("user", {}).get("name", "?")
|
||||
@@ -256,7 +393,121 @@ def print_summary(config: dict, total_personas: int, total_variants: int):
|
||||
print("=" * 50)
|
||||
|
||||
|
||||
def install_claude(output_dir: Path):
|
||||
"""Install personas to Claude Code as slash commands (~/.claude/commands/)."""
|
||||
commands_dir = Path.home() / ".claude" / "commands"
|
||||
commands_dir.mkdir(parents=True, exist_ok=True)
|
||||
count = 0
|
||||
for persona_dir in sorted(output_dir.iterdir()):
|
||||
if not persona_dir.is_dir() or persona_dir.name.startswith("_"):
|
||||
continue
|
||||
for prompt_file in persona_dir.glob("*.prompt.md"):
|
||||
variant = prompt_file.stem
|
||||
codename = persona_dir.name
|
||||
cmd_name = f"persona-{codename}" if variant == "general" else f"persona-{codename}-{variant}"
|
||||
dest = commands_dir / f"{cmd_name}.md"
|
||||
content = prompt_file.read_text(encoding="utf-8")
|
||||
# Wrap as Claude command: $ARGUMENTS placeholder for user query
|
||||
command_content = f"{content}\n\n---\nUser query: $ARGUMENTS\n"
|
||||
dest.write_text(command_content, encoding="utf-8")
|
||||
count += 1
|
||||
print(f" Claude: {count} commands installed to {commands_dir}")
|
||||
return count
|
||||
|
||||
|
||||
def install_antigravity(output_dir: Path):
|
||||
"""Install personas to Antigravity IDE system prompts."""
|
||||
# Antigravity stores system prompts in ~/.config/antigravity/prompts/ or project .antigravity/
|
||||
ag_dir = Path.home() / ".config" / "antigravity" / "personas"
|
||||
ag_dir.mkdir(parents=True, exist_ok=True)
|
||||
count = 0
|
||||
for persona_dir in sorted(output_dir.iterdir()):
|
||||
if not persona_dir.is_dir() or persona_dir.name.startswith("_"):
|
||||
continue
|
||||
for prompt_file in persona_dir.glob("*.prompt.md"):
|
||||
variant = prompt_file.stem
|
||||
codename = persona_dir.name
|
||||
dest = ag_dir / codename / f"{variant}.md"
|
||||
dest.parent.mkdir(parents=True, exist_ok=True)
|
||||
dest.write_text(prompt_file.read_text(encoding="utf-8"), encoding="utf-8")
|
||||
count += 1
|
||||
print(f" Antigravity: {count} personas installed to {ag_dir}")
|
||||
return count
|
||||
|
||||
|
||||
def install_gemini(output_dir: Path):
|
||||
"""Install personas as Gemini Gems (JSON format for Google AI Studio)."""
|
||||
gems_dir = output_dir / "_gems"
|
||||
gems_dir.mkdir(parents=True, exist_ok=True)
|
||||
count = 0
|
||||
for persona_dir in sorted(output_dir.iterdir()):
|
||||
if not persona_dir.is_dir() or persona_dir.name.startswith("_"):
|
||||
continue
|
||||
for json_file in persona_dir.glob("*.json"):
|
||||
data = json.loads(json_file.read_text(encoding="utf-8"))
|
||||
variant = data.get("variant", json_file.stem)
|
||||
codename = data.get("codename", persona_dir.name)
|
||||
name = data.get("name", codename.title())
|
||||
# Build Gemini Gem format
|
||||
gem = {
|
||||
"name": f"{name} — {variant}" if variant != "general" else name,
|
||||
"description": f"{data.get('role', '')} | {data.get('domain', '')}",
|
||||
"system_instruction": data.get("sections", {}).get("soul", "") + "\n\n" +
|
||||
data.get("sections", {}).get("expertise", "") + "\n\n" +
|
||||
data.get("sections", {}).get("methodology", "") + "\n\n" +
|
||||
data.get("sections", {}).get("behavior_rules", ""),
|
||||
"metadata": {
|
||||
"codename": codename,
|
||||
"variant": variant,
|
||||
"domain": data.get("domain", ""),
|
||||
"address_to": data.get("address_to", ""),
|
||||
"tone": data.get("tone", ""),
|
||||
"activation_triggers": data.get("activation_triggers", []),
|
||||
},
|
||||
}
|
||||
dest = gems_dir / f"{codename}-{variant}.json"
|
||||
dest.write_text(json.dumps(gem, ensure_ascii=False, indent=2), encoding="utf-8")
|
||||
count += 1
|
||||
print(f" Gemini: {count} gems generated to {gems_dir}")
|
||||
return count
|
||||
|
||||
|
||||
def install_openclaw(output_dir: Path):
|
||||
"""Install personas to OpenClaw format (IDENTITY.md + individual persona files)."""
|
||||
oc_dir = output_dir / "_openclaw"
|
||||
oc_dir.mkdir(parents=True, exist_ok=True)
|
||||
personas_dir = oc_dir / "personas"
|
||||
personas_dir.mkdir(parents=True, exist_ok=True)
|
||||
count = 0
|
||||
identity_sections = []
|
||||
for persona_dir in sorted(output_dir.iterdir()):
|
||||
if not persona_dir.is_dir() or persona_dir.name.startswith("_"):
|
||||
continue
|
||||
general_prompt = persona_dir / "general.prompt.md"
|
||||
if not general_prompt.exists():
|
||||
continue
|
||||
content = general_prompt.read_text(encoding="utf-8")
|
||||
codename = persona_dir.name
|
||||
# Write individual persona file
|
||||
(personas_dir / f"{codename}.md").write_text(content, encoding="utf-8")
|
||||
# Extract first line as title for IDENTITY.md
|
||||
first_line = content.split("\n")[0].strip("# ").strip()
|
||||
identity_sections.append(f"### {first_line}\nSee: personas/{codename}.md\n")
|
||||
count += 1
|
||||
# Write IDENTITY.md
|
||||
identity = "# IDENTITY — Persona Definitions\n\n" + "\n".join(identity_sections)
|
||||
(oc_dir / "IDENTITY.md").write_text(identity, encoding="utf-8")
|
||||
print(f" OpenClaw: {count} personas + IDENTITY.md to {oc_dir}")
|
||||
return count
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(description="Build persona library and optionally install to platforms.")
|
||||
parser.add_argument("--install", choices=["claude", "antigravity", "gemini", "openclaw", "all"],
|
||||
help="Install generated personas to a target platform")
|
||||
args = parser.parse_args()
|
||||
|
||||
root = Path(__file__).parent
|
||||
personas_dir = root / "personas"
|
||||
|
||||
@@ -282,12 +533,30 @@ def main():
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
print(f"Building {len(persona_dirs)} personas -> {output_dir}\n")
|
||||
|
||||
# Pre-build escalation graph for cross-persona injection
|
||||
escalation_graph = build_escalation_graph(personas_dir, flat_config)
|
||||
|
||||
total_variants = 0
|
||||
for pdir in persona_dirs:
|
||||
total_variants += build_persona(pdir, output_dir, flat_config, config)
|
||||
total_variants += build_persona(pdir, output_dir, flat_config, config, escalation_graph)
|
||||
|
||||
build_catalog(personas_dir, output_dir, config)
|
||||
print_summary(config, len(persona_dirs), total_variants)
|
||||
total_words = build_catalog(personas_dir, output_dir, config, flat_config)
|
||||
|
||||
# Platform installation
|
||||
if args.install:
|
||||
print(f"\n--- Installing to: {args.install} ---\n")
|
||||
targets = ["claude", "antigravity", "gemini", "openclaw"] if args.install == "all" else [args.install]
|
||||
for target in targets:
|
||||
if target == "claude":
|
||||
install_claude(output_dir)
|
||||
elif target == "antigravity":
|
||||
install_antigravity(output_dir)
|
||||
elif target == "gemini":
|
||||
install_gemini(output_dir)
|
||||
elif target == "openclaw":
|
||||
install_openclaw(output_dir)
|
||||
|
||||
print_summary(config, len(persona_dirs), total_variants, total_words)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
Reference in New Issue
Block a user