Claude-code-plugins-plus-skills maintainx-performance-tuning

install
source · Clone the upstream repo
git clone https://github.com/jeremylongshore/claude-code-plugins-plus-skills
Claude Code · Install into ~/.claude/skills/
T=$(mktemp -d) && git clone --depth=1 https://github.com/jeremylongshore/claude-code-plugins-plus-skills "$T" && mkdir -p ~/.claude/skills && cp -r "$T/plugins/saas-packs/maintainx-pack/skills/maintainx-performance-tuning" ~/.claude/skills/jeremylongshore-claude-code-plugins-plus-skills-maintainx-performance-tuning && rm -rf "$T"
manifest: plugins/saas-packs/maintainx-pack/skills/maintainx-performance-tuning/SKILL.md
source content

MaintainX Performance Tuning

Overview

Optimize MaintainX integration performance with caching, connection pooling, efficient pagination, and request deduplication.

Prerequisites

  • MaintainX integration working
  • Node.js 18+
  • Redis (recommended for production caching)
  • Performance baseline measurements

Instructions

Step 1: Connection Pooling with Keep-Alive

// src/performance/pooled-client.ts
import axios from 'axios';
import http from 'node:http';
import https from 'node:https';

// Reuse TCP connections instead of opening new ones per request
const httpAgent = new http.Agent({ keepAlive: true, maxSockets: 10 });
const httpsAgent = new https.Agent({ keepAlive: true, maxSockets: 10 });

const client = axios.create({
  baseURL: 'https://api.getmaintainx.com/v1',
  headers: {
    Authorization: `Bearer ${process.env.MAINTAINX_API_KEY}`,
    'Content-Type': 'application/json',
  },
  httpAgent,
  httpsAgent,
  timeout: 30_000,
});

// Benefit: Eliminates TCP handshake + TLS negotiation per request
// Typical improvement: 100-200ms saved per request

Step 2: Multi-Level Caching

// src/performance/cache.ts

interface CacheLayer<T> {
  get(key: string): Promise<T | undefined>;
  set(key: string, value: T, ttlMs: number): Promise<void>;
}

// L1: In-memory (fastest, per-process)
class MemoryCache<T> implements CacheLayer<T> {
  private store = new Map<string, { value: T; expiresAt: number }>();

  async get(key: string) {
    const entry = this.store.get(key);
    if (entry && entry.expiresAt > Date.now()) return entry.value;
    this.store.delete(key);
    return undefined;
  }

  async set(key: string, value: T, ttlMs: number) {
    this.store.set(key, { value, expiresAt: Date.now() + ttlMs });
  }
}

// L2: Redis (shared across processes)
class RedisCache<T> implements CacheLayer<T> {
  constructor(private redis: any) {}

  async get(key: string) {
    const data = await this.redis.get(`mx:${key}`);
    return data ? JSON.parse(data) : undefined;
  }

  async set(key: string, value: T, ttlMs: number) {
    await this.redis.setex(`mx:${key}`, Math.ceil(ttlMs / 1000), JSON.stringify(value));
  }
}

// Multi-level cache: check L1 first, then L2, then fetch
class MultiCache<T> {
  constructor(private l1: CacheLayer<T>, private l2: CacheLayer<T>) {}

  async getOrFetch(key: string, ttlMs: number, fetcher: () => Promise<T>): Promise<T> {
    // Check L1
    let value = await this.l1.get(key);
    if (value !== undefined) return value;

    // Check L2
    value = await this.l2.get(key);
    if (value !== undefined) {
      await this.l1.set(key, value, ttlMs / 2); // L1 shorter TTL
      return value;
    }

    // Fetch from API
    value = await fetcher();
    await this.l1.set(key, value, ttlMs / 2);
    await this.l2.set(key, value, ttlMs);
    return value;
  }
}

Step 3: DataLoader for Batch Loading

When multiple parts of your app need the same work order, batch and deduplicate:

// src/performance/dataloader.ts
import DataLoader from 'dataloader';

const workOrderLoader = new DataLoader<number, any>(
  async (ids: readonly number[]) => {
    // Batch: fetch multiple work orders in parallel
    const results = await Promise.all(
      ids.map((id) =>
        client.get(`/workorders/${id}`).then((r) => r.data)
      ),
    );
    // Return in same order as input ids
    return ids.map((id) => results.find((r) => r.id === id) || null);
  },
  {
    maxBatchSize: 25,
    cacheKeyFn: (id) => String(id),
  },
);

// These 3 calls collapse into 1 batched operation:
const [wo1, wo2, wo3] = await Promise.all([
  workOrderLoader.load(100),
  workOrderLoader.load(200),
  workOrderLoader.load(100), // deduped, same as first
]);

Step 4: Efficient Pagination

// Fetch only the fields you need (if API supports field selection)
// Use larger page sizes to reduce round trips

async function efficientFetchAll(client: any, endpoint: string, key: string) {
  const all = [];
  let cursor: string | undefined;
  let pageCount = 0;

  const startTime = Date.now();

  do {
    const { data } = await client.get(endpoint, {
      params: { limit: 100, cursor }, // Max page size
    });
    all.push(...data[key]);
    cursor = data.cursor;
    pageCount++;
  } while (cursor);

  const elapsed = Date.now() - startTime;
  console.log(`Fetched ${all.length} items in ${pageCount} pages (${elapsed}ms)`);
  return all;
}

// Parallel pagination for independent resources
async function fetchAllResources(client: any) {
  const [workOrders, assets, locations] = await Promise.all([
    efficientFetchAll(client, '/workorders', 'workOrders'),
    efficientFetchAll(client, '/assets', 'assets'),
    efficientFetchAll(client, '/locations', 'locations'),
  ]);

  return { workOrders, assets, locations };
}

Step 5: Request Deduplication

// src/performance/dedup.ts

class RequestDeduplicator {
  private inflight = new Map<string, Promise<any>>();

  async dedupe<T>(key: string, fetcher: () => Promise<T>): Promise<T> {
    if (this.inflight.has(key)) {
      return this.inflight.get(key)! as Promise<T>;
    }

    const promise = fetcher().finally(() => {
      this.inflight.delete(key);
    });

    this.inflight.set(key, promise);
    return promise;
  }
}

const dedup = new RequestDeduplicator();

// 10 concurrent calls to getWorkOrder(123) = 1 actual API call
async function getWorkOrder(id: number) {
  return dedup.dedupe(`wo:${id}`, () => client.get(`/workorders/${id}`));
}

Performance Benchmarks

OptimizationBeforeAfterImprovement
Connection pooling350ms/req150ms/req57% faster
L1 cache (hot path)150ms/req< 1ms/req99% faster
DataLoader batching10 calls1 call90% fewer requests
Max page size (100)50 pages10 pages5x fewer round trips
Request dedupN calls1 call(N-1) saved

Output

  • Connection pooling with keep-alive (reuses TCP connections)
  • Multi-level cache (L1 in-memory + L2 Redis)
  • DataLoader for batching and deduplication of entity fetches
  • Efficient pagination with max page sizes
  • Request deduplication preventing redundant concurrent calls

Error Handling

IssueCauseSolution
Stale cache dataTTL too longReduce TTL, invalidate on writes
Memory growthUnbounded cacheSet max size, use LRU eviction
DataLoader errorsOne item in batch failsHandle per-item errors in batch function
Connection pool exhaustionToo many concurrent requestsIncrease
maxSockets
or add queue

Resources

Next Steps

For cost optimization, see

maintainx-cost-tuning
.

Examples

Benchmark your API response times:

# Measure latency for 10 sequential requests
for i in $(seq 1 10); do
  curl -s -o /dev/null -w "Request $i: %{time_total}s\n" \
    "https://api.getmaintainx.com/v1/workorders?limit=1" \
    -H "Authorization: Bearer $MAINTAINX_API_KEY"
done