Files
keyhunter/pkg/recon/sources/httpclient.go
salvacybersec 75024e4701 feat(10-01): add shared retry HTTP client for recon sources
- Client.Do retries 429/403/5xx honoring Retry-After
- 401 returns ErrUnauthorized immediately (no retry)
- Context cancellation honored during retry sleeps
- Default UA keyhunter-recon/1.0, 30s timeout, 2 retries
2026-04-06 01:09:02 +03:00

109 lines
2.8 KiB
Go

package sources
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"time"
)
// ErrUnauthorized is returned when an API rejects credentials (401).
var ErrUnauthorized = errors.New("sources: unauthorized (check credentials)")
// Client is the shared retry wrapper every Phase 10 source uses.
//
// It handles 429/403/5xx retries honoring Retry-After, context cancellation
// during backoff, and wraps 401 responses in ErrUnauthorized. Rate limiting
// is intentionally out of scope — callers call recon.LimiterRegistry.Wait
// before Do to keep this type single-purpose.
type Client struct {
HTTP *http.Client
MaxRetries int // default 2
UserAgent string // default "keyhunter-recon/1.0"
}
// NewClient returns a Client with a 30s timeout and 2 retries.
func NewClient() *Client {
return &Client{
HTTP: &http.Client{Timeout: 30 * time.Second},
MaxRetries: 2,
UserAgent: "keyhunter-recon/1.0",
}
}
// Do executes req with retries on 429/403/5xx honoring Retry-After.
// 401 returns ErrUnauthorized wrapped with the response body.
// Ctx cancellation is honored both during the request and during retry sleeps.
func (c *Client) Do(ctx context.Context, req *http.Request) (*http.Response, error) {
if req.Header.Get("User-Agent") == "" {
req.Header.Set("User-Agent", c.UserAgent)
}
var lastStatus int
var lastBody string
for attempt := 0; attempt <= c.MaxRetries; attempt++ {
r, err := c.HTTP.Do(req.WithContext(ctx))
if err != nil {
return nil, fmt.Errorf("sources http: %w", err)
}
if r.StatusCode == http.StatusOK {
return r, nil
}
if r.StatusCode == http.StatusUnauthorized {
body := readBody(r)
return nil, fmt.Errorf("%w: %s", ErrUnauthorized, body)
}
retriable := r.StatusCode == http.StatusTooManyRequests ||
r.StatusCode == http.StatusForbidden ||
r.StatusCode >= 500
if !retriable || attempt == c.MaxRetries {
body := readBody(r)
return nil, fmt.Errorf("sources http %d: %s", r.StatusCode, body)
}
sleep := ParseRetryAfter(r.Header.Get("Retry-After"))
lastStatus = r.StatusCode
lastBody = readBody(r)
select {
case <-time.After(sleep):
case <-ctx.Done():
return nil, ctx.Err()
}
}
return nil, fmt.Errorf("sources http: retries exhausted (last %d: %s)", lastStatus, lastBody)
}
// ParseRetryAfter decodes integer-seconds Retry-After, defaulting to 1s when
// the header is missing, unparseable, or zero.
func ParseRetryAfter(v string) time.Duration {
if v == "" {
return 1 * time.Second
}
n, err := strconv.Atoi(v)
if err != nil || n <= 0 {
return 1 * time.Second
}
return time.Duration(n) * time.Second
}
// readBody reads up to 4KB of the response body and closes it.
func readBody(r *http.Response) string {
if r.Body == nil {
return ""
}
defer r.Body.Close()
b, err := io.ReadAll(io.LimitReader(r.Body, 4096))
if err != nil {
return ""
}
return string(b)
}