All files / src/app/api/admin/cache/stats route.ts

0% Statements 0/78
100% Branches 0/0
0% Functions 0/1
0% Lines 0/78

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79                                                                                                                                                             
/**
 * Cache Stats API
 *
 * GET /api/admin/cache/stats - Get cache statistics (Admin only)
 *
 * Reports stats from both caching systems:
 * - Core cache (Redis/memory hybrid) - used by API routes
 * - Node-cache (legacy) - used by some cached queries
 */

import { NextResponse } from 'next/server';
import { withErrorHandling, withAdmin } from '@/lib/api';
import {
  getCacheStats as getNodeCacheStats,
  getCachedKeys as getNodeCacheKeys,
  CACHE_VERSION,
} from '@/lib/cache';
import { getCacheStats as getCoreCacheStats } from '@/lib/core';

async function handleGet(): Promise<NextResponse> {
  // Get stats from node-cache (lib/cache)
  const nodeCacheStats = getNodeCacheStats();
  const nodeCacheKeys = getNodeCacheKeys();

  // Get stats from core cache (lib/core/cache - the one actually used by APIs)
  const coreCacheStats = getCoreCacheStats();

  // Combine keys from both caches
  const allKeys = [...new Set([...nodeCacheKeys, ...coreCacheStats.keys])];

  // Group keys by prefix for easier understanding
  const keysByPrefix: Record<string, number> = {};
  allKeys.forEach(key => {
    const prefix = key.split(':')[0];
    keysByPrefix[prefix] = (keysByPrefix[prefix] || 0) + 1;
  });

  // Combined stats from both caches
  const totalKeys = allKeys.length;
  const hits = nodeCacheStats.hits + coreCacheStats.hits;
  const misses = nodeCacheStats.misses + coreCacheStats.misses;
  const total = hits + misses;

  // Estimate memory usage (rough calculation for core cache)
  const estimatedCoreMemory = coreCacheStats.keys.reduce((acc, key) => acc + key.length * 2, 0);

  return NextResponse.json({
    success: true,
    data: {
      version: CACHE_VERSION,
      stats: {
        hits,
        misses,
        keys: totalKeys,
        ksize: nodeCacheStats.ksize + estimatedCoreMemory,
        vsize: nodeCacheStats.vsize,
        hitRate: total > 0 ? hits / total : 0,
        hitRatePercent: total > 0 ? ((hits / total) * 100).toFixed(2) + '%' : '0.00%',
      },
      totalKeys,
      keysByPrefix,
      // Only include first 100 keys to avoid large responses
      sampleKeys: allKeys.slice(0, 100),
      // Debug info about cache sources
      sources: {
        nodeCacheKeys: nodeCacheKeys.length,
        nodeCacheHits: nodeCacheStats.hits,
        nodeCacheMisses: nodeCacheStats.misses,
        coreCacheKeys: coreCacheStats.size,
        coreCacheHits: coreCacheStats.hits,
        coreCacheMisses: coreCacheStats.misses,
      },
      timestamp: new Date().toISOString(),
    },
  });
}

export const GET = withErrorHandling(withAdmin(handleGet));