Potential prompt injection attack in body HTML
Detects messages containing references to major AI tools (like Gemini, Copilot, ChatGPT, or Claude) in non-standard HTML elements.
Sublime rule (View on GitHub)
1name: "Potential prompt injection attack in body HTML"
2description: "Detects messages containing references to major AI tools (like Gemini, Copilot, ChatGPT, or Claude) in non-standard HTML elements."
3type: "rule"
4severity: "high"
5source: |
6 type.inbound
7 and length(filter(html.xpath(body.html, "//*[local-name() = 'admin']").nodes,
8 length(.display_text) > 0
9 and strings.ilike(.display_text,
10 '*gemini*',
11 '*copilot*',
12 '*chatgpt*',
13 '*claude*'
14 )
15 )
16 ) > 0
17
18 // negate highly trusted sender domains unless they fail DMARC authentication
19 and (
20 (
21 sender.email.domain.root_domain in $high_trust_sender_root_domains
22 and not headers.auth_summary.dmarc.pass
23 )
24 or sender.email.domain.root_domain not in $high_trust_sender_root_domains
25 )
26attack_types:
27 - "Callback Phishing"
28 - "Credential Phishing"
29 - "Extortion"
30 - "Malware/Ransomware"
31 - "Spam"
32 - "BEC/Fraud"
33tactics_and_techniques:
34 - "Evasion"
35 - "Social engineering"
36detection_methods:
37 - "Header analysis"
38 - "HTML analysis"
39 - "Content analysis"
40id: "5fb24736-df8a-5a3a-84da-a2d5560d73d1"