The Wulf's Den

Gemma 3: no wait / Chatgpt-4o-Latest: no wait / GPT-4o Mini / 3.5 Turbo: no wait / GPT-4: no wait / GPT-4 Turbo: no wait / GPT-4o: no wait / Gemini Flash: no wait / Gemini Pro: no wait / Mistral 7B: no wait / Mistral Nemo: no wait / Mistral Medium: no wait / Mistral Large: no wait / Deepseek: no wait / OpenAI o3 mini: no wait / Grok: no wait / OpenAI GPT-4.1: no wait / OpenAI GPT-4.1-Mini: no wait / OpenAI GPT-4.1-Nano: no wait / OpenAI o4 mini: no wait

Welcome to The Wulf's Den

Small disclaimer: streaming does not currently work well with Gemini, so if you experience any issues/errors please disable streaming. and context is seemingly broken a bit on Gemini as well, so anything over 30k tokens/context you might recieve errors, there is no fix for this as of yet. streaming should be prefectly fine with everything else.


Service Info

{
  "uptime": 82525,
  "endpoints": {
    "xai": "https://wulfs-den.ink/proxy/xai",
    "openai": "https://wulfs-den.ink/proxy/openai",
    "google-ai": "https://wulfs-den.ink/proxy/google-ai",
    "mistral-ai": "https://wulfs-den.ink/proxy/mistral-ai",
    "deepseek": "https://wulfs-den.ink/proxy/deepseek",
    "universal": "https://wulfs-den.ink/proxy"
  },
  "proompts": 14600,
  "tookens": "275.05m",
  "proomptsTotal": 2659038,
  "proomptersNow": 2,
  "tookensTotal": "52.244b",
  "xaiKeys": 5,
  "openaiKeys": 10,
  "openaiOrgs": 10,
  "google-aiKeys": 365,
  "mistral-aiKeys": 3,
  "deepseekKeys": 376,
  "grok": {
    "usage": "2.89m tokens",
    "activeKeys": 5,
    "revokedKeys": 0,
    "overQuotaKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "turbo": {
    "usage": "130.7k tokens",
    "activeKeys": 5,
    "revokedKeys": 1,
    "overQuotaKeys": 4,
    "trialKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt4-turbo": {
    "usage": "550.7k tokens",
    "activeKeys": 5,
    "overQuotaKeys": 4,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt4": {
    "usage": "21.9k tokens",
    "activeKeys": 5,
    "overQuotaKeys": 4,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "chatgpt-4o-latest": {
    "usage": "96.71m tokens",
    "activeKeys": 4,
    "overQuotaKeys": 4,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt41-nano": {
    "usage": "4.8k tokens",
    "activeKeys": 5,
    "overQuotaKeys": 4,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt4o": {
    "usage": "9.00m tokens",
    "activeKeys": 5,
    "overQuotaKeys": 4,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt41-mini": {
    "usage": "0 tokens",
    "activeKeys": 5,
    "overQuotaKeys": 4,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "o3-mini": {
    "usage": "32.3k tokens",
    "activeKeys": 4,
    "overQuotaKeys": 4,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt41": {
    "usage": "20.75m tokens",
    "activeKeys": 5,
    "overQuotaKeys": 4,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "o4-mini": {
    "usage": "0 tokens",
    "activeKeys": 4,
    "overQuotaKeys": 4,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gemini-pro": {
    "usage": "135.24m tokens",
    "activeKeys": 362,
    "revokedKeys": 3,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gemini-flash": {
    "usage": "6.11m tokens",
    "activeKeys": 362,
    "revokedKeys": 3,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gemma-3": {
    "usage": "0 tokens",
    "activeKeys": 362,
    "revokedKeys": 3,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "mistral-small": {
    "usage": "0 tokens",
    "activeKeys": 3,
    "revokedKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "mistral-tiny": {
    "usage": "0 tokens",
    "activeKeys": 3,
    "revokedKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "mistral-medium": {
    "usage": "0 tokens",
    "activeKeys": 3,
    "revokedKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "mistral-large": {
    "usage": "153.2k tokens",
    "activeKeys": 3,
    "revokedKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "deepseek": {
    "usage": "3.44m tokens",
    "activeKeys": 185,
    "revokedKeys": 56,
    "overQuotaKeys": 135,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "config": {
    "page_body": "200",
    "gatekeeper": "user_token",
    "captchaMode": "none",
    "powTokenPrompt": "100",
    "powTokenMaxIps": "2",
    "powDifficultyLevel": "low",
    "powChallengeTimeout": "30",
    "textModelRateLimit": "4",
    "imageModelRateLimit": "4",
    "maxContextTokensOpenAI": "72000",
    "maxContextTokensAnthropic": "90000",
    "maxOutputTokensOpenAI": "12000",
    "maxOutputTokensAnthropic": "8192",
    "maxOutputTokensGoogle": "30000",
    "universalEndpoint": "true",
    "rejectMessage": "This content violates /aicg/'s acceptable use policy.",
    "hashIp": "false",
    "allowAwsLogging": "false",
    "promptLogging": "false",
    "tokenQuota": {
      "chatgpt-4o-latest": "0",
      "gpt41": "0",
      "gpt41-nano": "0",
      "gpt41-mini": "0",
      "turbo": "0",
      "gpt4": "0",
      "gpt4-32k": "0",
      "gpt4-turbo": "0",
      "gpt4o": "0",
      "gpt45": "0",
      "o1": "0",
      "o1-mini": "0",
      "o3": "0",
      "o3-mini": "0",
      "o4-mini": "0",
      "dall-e": "0",
      "claude": "0",
      "claude-opus": "0",
      "gemma-3": "0",
      "gemini-flash": "0",
      "gemini-pro": "0",
      "gemini-ultra": "0",
      "mistral-tiny": "0",
      "mistral-small": "0",
      "mistral-medium": "0",
      "mistral-large": "0",
      "aws-claude": "0",
      "aws-claude-opus": "0",
      "aws-mistral-tiny": "0",
      "aws-mistral-small": "0",
      "aws-mistral-medium": "0",
      "aws-mistral-large": "0",
      "gcp-claude": "0",
      "gcp-claude-opus": "0",
      "azure-turbo": "0",
      "azure-gpt4": "0",
      "azure-gpt4-32k": "0",
      "azure-gpt4-turbo": "0",
      "azure-gpt4o": "0",
      "azure-gpt45": "0",
      "azure-gpt41": "0",
      "azure-gpt41-nano": "0",
      "azure-gpt41-mini": "0",
      "azure-dall-e": "0",
      "azure-o1": "0",
      "azure-o1-mini": "0",
      "azure-o3": "0",
      "azure-o4-mini": "0",
      "azure-o3-mini": "0",
      "deepseek": "0",
      "grok": "0"
    },
    "allowOpenAIToolUsage": "true",
    "allowedVisionServices": "google-ai,openai,mistral-ai,xai",
    "tokensPunishmentFactor": "0"
  },
  "build": "9301530 (modified) (main@subzerowolf1/oai-reverse-proxy)"
}