The Wulf's Den

Gemma 3: no wait / Chatgpt-4o-Latest: no wait / GPT-4o Mini / 3.5 Turbo: no wait / GPT-4: no wait / GPT-4 32k: no wait / GPT-4 Turbo: no wait / GPT-4o: no wait / Gemini Flash: no wait / Gemini Pro: no wait / Mistral 7B: no wait / Mistral Nemo: no wait / Mistral Medium: no wait / Mistral Large: no wait / Deepseek: no wait / OpenAI o3 mini: no wait

Welcome to The Wulf's Den

Small disclaimer: streaming does not currently work well with Gemini, so if you experience any issues/errors please disable streaming. and context is seemingly broken a bit on Gemini as well, so anything over 30k tokens/context you might recieve errors, there is no fix for this as of yet. streaming should be prefectly fine with everything else.


Service Info

{
  "uptime": 1864,
  "endpoints": {
    "openai": "https://wulfs-den.ink/proxy/openai",
    "google-ai": "https://wulfs-den.ink/proxy/google-ai",
    "mistral-ai": "https://wulfs-den.ink/proxy/mistral-ai",
    "deepseek": "https://wulfs-den.ink/proxy/deepseek",
    "universal": "https://wulfs-den.ink/proxy"
  },
  "proompts": 246,
  "tookens": "4.92m",
  "proomptsTotal": 1366197,
  "proomptersNow": 5,
  "tookensTotal": "24.689b",
  "openaiKeys": 49,
  "openaiOrgs": 33,
  "google-aiKeys": 344,
  "mistral-aiKeys": 3,
  "deepseekKeys": 106,
  "status": "Checking 2 keys...",
  "gpt4": {
    "usage": "0 tokens",
    "activeKeys": 41,
    "overQuotaKeys": 3,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt4-turbo": {
    "usage": "109.1k tokens",
    "activeKeys": 38,
    "overQuotaKeys": 3,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt4o": {
    "usage": "194.2k tokens",
    "activeKeys": 41,
    "overQuotaKeys": 3,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "o3-mini": {
    "usage": "0 tokens",
    "activeKeys": 15,
    "overQuotaKeys": 3,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "turbo": {
    "usage": "0 tokens",
    "activeKeys": 46,
    "revokedKeys": 0,
    "overQuotaKeys": 3,
    "trialKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "chatgpt-4o-latest": {
    "usage": "3.18m tokens",
    "activeKeys": 38,
    "overQuotaKeys": 3,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gpt4-32k": {
    "usage": "0 tokens",
    "activeKeys": 1,
    "overQuotaKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gemini-pro": {
    "usage": "426.6k tokens",
    "activeKeys": 344,
    "revokedKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gemini-flash": {
    "usage": "777.3k tokens",
    "activeKeys": 344,
    "revokedKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "gemma-3": {
    "usage": "0 tokens",
    "activeKeys": 344,
    "revokedKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "mistral-small": {
    "usage": "0 tokens",
    "activeKeys": 3,
    "revokedKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "mistral-tiny": {
    "usage": "0 tokens",
    "activeKeys": 3,
    "revokedKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "mistral-medium": {
    "usage": "0 tokens",
    "activeKeys": 3,
    "revokedKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "mistral-large": {
    "usage": "39.7k tokens",
    "activeKeys": 3,
    "revokedKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "deepseek": {
    "usage": "192.5k tokens",
    "activeKeys": 106,
    "revokedKeys": 0,
    "overQuotaKeys": 0,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "config": {
    "page_body": "200",
    "gatekeeper": "user_token",
    "captchaMode": "none",
    "powTokenPrompt": "100",
    "powTokenMaxIps": "2",
    "powDifficultyLevel": "low",
    "powChallengeTimeout": "30",
    "textModelRateLimit": "4",
    "imageModelRateLimit": "4",
    "maxContextTokensOpenAI": "90000",
    "maxContextTokensAnthropic": "90000",
    "maxOutputTokensOpenAI": "8192",
    "maxOutputTokensAnthropic": "8192",
    "universalEndpoint": "true",
    "rejectMessage": "This content violates /aicg/'s acceptable use policy.",
    "hashIp": "false",
    "allowAwsLogging": "false",
    "promptLogging": "false",
    "tokenQuota": {
      "chatgpt-4o-latest": "0",
      "turbo": "0",
      "gpt4": "0",
      "gpt4-32k": "0",
      "gpt4-turbo": "0",
      "gpt4o": "0",
      "gpt45": "0",
      "o1": "0",
      "o1-mini": "0",
      "o3-mini": "0",
      "dall-e": "0",
      "claude": "0",
      "claude-opus": "0",
      "gemma-3": "0",
      "gemini-flash": "0",
      "gemini-pro": "0",
      "gemini-ultra": "0",
      "mistral-tiny": "0",
      "mistral-small": "0",
      "mistral-medium": "0",
      "mistral-large": "0",
      "aws-claude": "0",
      "aws-claude-opus": "0",
      "aws-mistral-tiny": "0",
      "aws-mistral-small": "0",
      "aws-mistral-medium": "0",
      "aws-mistral-large": "0",
      "gcp-claude": "0",
      "gcp-claude-opus": "0",
      "azure-turbo": "0",
      "azure-gpt4": "0",
      "azure-gpt4-32k": "0",
      "azure-gpt4-turbo": "0",
      "azure-gpt4o": "0",
      "azure-gpt45": "0",
      "azure-dall-e": "0",
      "azure-o1": "0",
      "azure-o1-mini": "0",
      "azure-o3-mini": "0",
      "deepseek": "0"
    },
    "allowOpenAIToolUsage": "true",
    "allowedVisionServices": "google-ai,openai,mistral-ai",
    "tokensPunishmentFactor": "0"
  },
  "build": "696f7b7 (main@subzerowolf1/oai-reverse-proxy)"
}