Files
keyhunter/pkg/dorks/definitions/shodan/infrastructure.yaml
salvacybersec 56c11e39a0 feat(08-03): add 20 Shodan dorks for exposed LLM infrastructure
- frontier.yaml: 6 dorks (OpenAI/Anthropic proxies, Azure OpenAI certs, AWS Bedrock, LiteLLM)
- infrastructure.yaml: 14 dorks (Ollama, vLLM, LocalAI, LM Studio, text-generation-webui, Open WebUI, Triton, TGI, LangServe, FastChat, OpenRouter/Portkey/Helicone gateways)
- Real Shodan query syntax: http.title, http.html, ssl.cert.subject.cn, product, port, http.component
- Dual-located: pkg/dorks/definitions/shodan/ + dorks/shodan/
2026-04-06 00:21:03 +03:00

99 lines
3.4 KiB
YAML

- id: shodan-ollama-default
name: "Ollama on default port 11434"
source: shodan
category: infrastructure
query: 'product:"Ollama" port:11434'
description: "Ollama servers banner-identified on the default port"
tags: [ollama, self-hosted, tier8]
- id: shodan-ollama-tags
name: "Ollama /api/tags endpoints"
source: shodan
category: infrastructure
query: 'http.html:"/api/tags" http.title:"Ollama"'
description: "Ollama servers exposing the model tags listing endpoint"
tags: [ollama, self-hosted, tier8]
- id: shodan-vllm
name: "vLLM /v1/models endpoints"
source: shodan
category: infrastructure
query: 'http.html:"vLLM" http.html:"/v1/models"'
description: "vLLM inference servers exposing the models endpoint"
tags: [vllm, self-hosted, tier8]
- id: shodan-localai
name: "LocalAI dashboards"
source: shodan
category: infrastructure
query: 'http.title:"LocalAI"'
description: "LocalAI self-hosted inference dashboards"
tags: [localai, self-hosted, tier8]
- id: shodan-lmstudio
name: "LM Studio servers"
source: shodan
category: infrastructure
query: 'http.title:"LM Studio"'
description: "Exposed LM Studio local inference servers"
tags: [lmstudio, self-hosted, tier8]
- id: shodan-textgenwebui
name: "text-generation-webui instances"
source: shodan
category: infrastructure
query: 'http.title:"text-generation-webui"'
description: "Exposed oobabooga text-generation-webui instances"
tags: [textgen, self-hosted, tier8]
- id: shodan-openwebui
name: "Open WebUI chat servers"
source: shodan
category: infrastructure
query: 'http.title:"Open WebUI" http.html:"/api/chat"'
description: "Exposed Open WebUI chat front-ends"
tags: [openwebui, self-hosted, tier8]
- id: shodan-openrouter-proxy
name: "OpenRouter-linked proxies"
source: shodan
category: infrastructure
query: 'http.html:"openrouter.ai" port:443'
description: "HTTPS hosts referencing openrouter.ai in page content"
tags: [openrouter, gateway, tier5]
- id: shodan-portkey-gateway
name: "Portkey gateway dashboards"
source: shodan
category: infrastructure
query: 'http.title:"Portkey"'
description: "Exposed Portkey AI gateway dashboards"
tags: [portkey, gateway, tier5]
- id: shodan-helicone-gateway
name: "Helicone gateway endpoints"
source: shodan
category: infrastructure
query: 'http.html:"helicone" http.html:"/v1"'
description: "Hosts referencing Helicone observability gateway endpoints"
tags: [helicone, gateway, tier5]
- id: shodan-triton-server
name: "NVIDIA Triton inference servers"
source: shodan
category: infrastructure
query: 'http.html:"NVIDIA Triton" http.html:"/v2/models"'
description: "Exposed NVIDIA Triton inference servers"
tags: [triton, nvidia, tier8]
- id: shodan-tgi-hf
name: "HF text-generation-inference servers"
source: shodan
category: infrastructure
query: 'http.html:"text-generation-inference" "/generate"'
description: "Hugging Face text-generation-inference servers exposing /generate"
tags: [huggingface, tgi, tier8]
- id: shodan-langserve
name: "LangServe endpoints"
source: shodan
category: infrastructure
query: 'http.title:"LangServe"'
description: "Exposed LangChain LangServe deployments"
tags: [langserve, tier8]
- id: shodan-fastchat
name: "FastChat servers"
source: shodan
category: infrastructure
query: 'http.title:"FastChat"'
description: "Exposed FastChat multi-model serving instances"
tags: [fastchat, self-hosted, tier8]