diff --git a/e2e/settings.spec.ts b/e2e/settings.spec.ts
index af6c247..fa3c6b9 100644
--- a/e2e/settings.spec.ts
+++ b/e2e/settings.spec.ts
@@ -16,17 +16,19 @@ async function setupAuth(page: Page) {
},
})
);
- await page.route("https://api.github.com/search/issues*", (route) =>
- route.fulfill({
- status: 200,
- json: { total_count: 0, incomplete_results: false, items: [] },
- })
- );
await page.route("https://api.github.com/notifications*", (route) =>
route.fulfill({ status: 200, json: [] })
);
await page.route("https://api.github.com/graphql", (route) =>
- route.fulfill({ status: 200, json: { data: {} } })
+ route.fulfill({
+ status: 200,
+ json: {
+ data: {
+ search: { issueCount: 0, pageInfo: { hasNextPage: false, endCursor: null }, nodes: [] },
+ rateLimit: { remaining: 5000, resetAt: new Date(Date.now() + 3600000).toISOString() },
+ },
+ },
+ })
);
await page.addInitScript(() => {
diff --git a/e2e/smoke.spec.ts b/e2e/smoke.spec.ts
index 65369c0..fc2cc64 100644
--- a/e2e/smoke.spec.ts
+++ b/e2e/smoke.spec.ts
@@ -17,12 +17,6 @@ async function setupAuth(page: Page) {
},
})
);
- await page.route("https://api.github.com/search/issues*", (route) =>
- route.fulfill({
- status: 200,
- json: { total_count: 0, incomplete_results: false, items: [] },
- })
- );
await page.route(
"https://api.github.com/repos/*/actions/runs*",
(route) =>
@@ -35,7 +29,15 @@ async function setupAuth(page: Page) {
route.fulfill({ status: 200, json: [] })
);
await page.route("https://api.github.com/graphql", (route) =>
- route.fulfill({ status: 200, json: { data: {} } })
+ route.fulfill({
+ status: 200,
+ json: {
+ data: {
+ search: { issueCount: 0, pageInfo: { hasNextPage: false, endCursor: null }, nodes: [] },
+ rateLimit: { remaining: 5000, resetAt: new Date(Date.now() + 3600000).toISOString() },
+ },
+ },
+ })
);
// Seed localStorage with auth token and config before the page loads
@@ -104,10 +106,15 @@ test("OAuth callback flow completes and redirects", async ({ page }) => {
);
});
// Also intercept downstream dashboard API calls
- await page.route("https://api.github.com/search/issues*", (route) =>
+ await page.route("https://api.github.com/graphql", (route) =>
route.fulfill({
status: 200,
- json: { total_count: 0, incomplete_results: false, items: [] },
+ json: {
+ data: {
+ search: { issueCount: 0, pageInfo: { hasNextPage: false, endCursor: null }, nodes: [] },
+ rateLimit: { remaining: 5000, resetAt: new Date(Date.now() + 3600000).toISOString() },
+ },
+ },
})
);
await page.route("https://api.github.com/notifications*", (route) =>
diff --git a/src/app/components/dashboard/DashboardPage.tsx b/src/app/components/dashboard/DashboardPage.tsx
index 7b148e5..74e9522 100644
--- a/src/app/components/dashboard/DashboardPage.tsx
+++ b/src/app/components/dashboard/DashboardPage.tsx
@@ -14,7 +14,7 @@ import { clearAuth, user, onAuthCleared, DASHBOARD_STORAGE_KEY } from "../../sto
// ── Shared dashboard store (module-level to survive navigation) ─────────────
-const CACHE_VERSION = 1;
+const CACHE_VERSION = 2;
interface DashboardStore {
issues: Issue[];
diff --git a/src/app/components/dashboard/PullRequestsTab.tsx b/src/app/components/dashboard/PullRequestsTab.tsx
index d77d095..c67cf01 100644
--- a/src/app/components/dashboard/PullRequestsTab.tsx
+++ b/src/app/components/dashboard/PullRequestsTab.tsx
@@ -349,7 +349,7 @@ export default function PullRequestsTab(props: PullRequestsTabProps) {
createdAt={pr.createdAt}
url={pr.htmlUrl}
labels={pr.labels}
- commentCount={pr.comments + pr.reviewComments}
+ commentCount={pr.comments + pr.reviewThreads}
onIgnore={() => handleIgnore(pr)}
density={config.viewDensity}
>
diff --git a/src/app/components/layout/Header.tsx b/src/app/components/layout/Header.tsx
index e8ca9e8..21b8c2e 100644
--- a/src/app/components/layout/Header.tsx
+++ b/src/app/components/layout/Header.tsx
@@ -1,7 +1,7 @@
import { createSignal, Show } from "solid-js";
import { useNavigate } from "@solidjs/router";
import { user, clearAuth } from "../../stores/auth";
-import { getCoreRateLimit, getSearchRateLimit } from "../../services/github";
+import { getCoreRateLimit, getGraphqlRateLimit } from "../../services/github";
import { getUnreadCount, markAllAsRead } from "../../lib/errors";
import NotificationDrawer from "../shared/NotificationDrawer";
import ToastContainer from "../shared/ToastContainer";
@@ -27,7 +27,7 @@ export default function Header() {
const unreadCount = () => getUnreadCount();
const coreRL = () => getCoreRateLimit();
- const searchRL = () => getSearchRateLimit();
+ const graphqlRL = () => getGraphqlRateLimit();
function formatLimit(remaining: number, limit: number, unit: string): string {
const k = limit >= 1000 ? `${limit / 1000}k` : String(limit);
@@ -43,7 +43,7 @@ export default function Header() {
-
+
Rate Limits
@@ -57,13 +57,13 @@ export default function Header() {
)}
-
+
{(rl) => (
- {formatLimit(rl().remaining, 30, "min")}
+ GraphQL {formatLimit(rl().remaining, 5000, "hr")}
)}
diff --git a/src/app/services/api.ts b/src/app/services/api.ts
index e01404a..6393037 100644
--- a/src/app/services/api.ts
+++ b/src/app/services/api.ts
@@ -1,5 +1,4 @@
-import { getClient, cachedRequest, updateRateLimitFromHeaders } from "./github";
-import { evictByPrefix } from "../stores/cache";
+import { getClient, cachedRequest, updateGraphqlRateLimit } from "./github";
import { pushNotification } from "../lib/errors";
// ── Types ────────────────────────────────────────────────────────────────────
@@ -62,7 +61,7 @@ export interface PullRequest {
deletions: number;
changedFiles: number;
comments: number;
- reviewComments: number;
+ reviewThreads: number;
labels: { name: string; color: string }[];
reviewDecision: "APPROVED" | "CHANGES_REQUESTED" | "REVIEW_REQUIRED" | null;
totalReviewCount: number;
@@ -117,28 +116,6 @@ interface RawRepo {
pushed_at: string | null;
}
-interface RawPullRequest {
- id: number;
- number: number;
- title: string;
- state: string;
- draft: boolean;
- html_url: string;
- created_at: string;
- updated_at: string;
- user: { login: string; avatar_url: string } | null;
- head: { sha: string; ref: string; repo: { full_name: string } | null };
- base: { ref: string };
- assignees: { login: string }[];
- requested_reviewers: { login: string }[];
- additions: number;
- deletions: number;
- changed_files: number;
- comments: number;
- review_comments: number;
- labels: { name: string; color: string }[];
-}
-
interface RawWorkflowRun {
id: number;
name: string;
@@ -159,49 +136,14 @@ interface RawWorkflowRun {
actor: { login: string } | null;
}
-// ── Search API types ─────────────────────────────────────────────────────────
-
-interface RawSearchResponse {
- total_count: number;
- incomplete_results: boolean;
- items: RawSearchItem[];
-}
-
-interface RawSearchItem {
- id: number;
- number: number;
- title: string;
- state: string;
- html_url: string;
- created_at: string;
- updated_at: string;
- user: { login: string; avatar_url: string } | null;
- labels: { name: string; color: string }[];
- assignees: { login: string }[];
- // Search API returns repository_url (string), NOT repository (object).
- // We parse full_name from the URL in getRepoFullName().
- repository_url?: string;
- pull_request?: unknown;
- comments: number;
-}
-
-/** Extract "owner/repo" from "https://api.github.com/repos/owner/repo" */
-function getRepoFullName(item: RawSearchItem): string | null {
- const url = item.repository_url;
- if (!url) return null;
- const match = url.match(/\/repos\/([^/]+\/[^/]+)$/);
- return match ? match[1] : null;
-}
-
// ── Constants ────────────────────────────────────────────────────────────────
// Batch repos into chunks for search queries (keeps URL length manageable)
const SEARCH_REPO_BATCH_SIZE = 30;
-// Max PRs per GraphQL batch. Each alias fetches statusCheckRollup + pullRequest
-// (reviewDecision + latestReviews(first:15)). Cost: ~16 nodes/alias = ~800 pts/batch.
-// At 6 polls/hr (10min interval): ~4800 pts/hr against 5000/hr GraphQL budget.
-// Do not increase batch size or latestReviews.first without recalculating.
+// Max fork PRs per GraphQL batch for the statusCheckRollup fallback query.
+// Each alias looks up a single commit in the fork repo. Kept conservatively small
+// to avoid hitting query complexity limits when many fork PRs need fallback.
const GRAPHQL_CHECK_BATCH_SIZE = 50;
// Repos confirmed to have zero workflow runs — skipped on subsequent polls.
@@ -230,6 +172,27 @@ function extractRejectionError(reason: unknown): { statusCode: number | null; me
return { statusCode, message };
}
+/**
+ * Extracts partial data from a GraphqlResponseError for search queries.
+ * Only matches responses containing a `search` key (issues/PRs search shape).
+ */
+function extractSearchPartialData(err: unknown): T | null {
+ if (
+ err &&
+ typeof err === "object" &&
+ "data" in err &&
+ err.data &&
+ typeof err.data === "object" &&
+ "search" in err.data
+ ) {
+ return err.data as T;
+ }
+ return null;
+}
+
+const VALID_REPO_NAME = /^[A-Za-z0-9._-]+\/[A-Za-z0-9._-]+$/;
+const VALID_LOGIN = /^[A-Za-z0-9\[\]-]+$/;
+
function chunkArray(arr: T[], size: number): T[][] {
const chunks: T[][] = [];
for (let i = 0; i < arr.length; i += size) {
@@ -238,108 +201,533 @@ function chunkArray(arr: T[], size: number): T[][] {
return chunks;
}
+type GitHubOctokit = NonNullable>;
+
+// ── GraphQL search types ─────────────────────────────────────────────────────
+
+interface GraphQLIssueNode {
+ databaseId: number;
+ number: number;
+ title: string;
+ state: string;
+ url: string;
+ createdAt: string;
+ updatedAt: string;
+ author: { login: string; avatarUrl: string } | null;
+ labels: { nodes: { name: string; color: string }[] };
+ assignees: { nodes: { login: string }[] };
+ repository: { nameWithOwner: string } | null;
+ comments: { totalCount: number };
+}
+
+interface GraphQLIssueSearchResponse {
+ search: {
+ issueCount: number;
+ pageInfo: { hasNextPage: boolean; endCursor: string | null };
+ nodes: (GraphQLIssueNode | null)[];
+ };
+ rateLimit?: { remaining: number; resetAt: string };
+}
+
+interface GraphQLPRNode {
+ databaseId: number;
+ number: number;
+ title: string;
+ state: string;
+ isDraft: boolean;
+ url: string;
+ createdAt: string;
+ updatedAt: string;
+ author: { login: string; avatarUrl: string } | null;
+ headRefOid: string;
+ headRefName: string;
+ baseRefName: string;
+ headRepository: { owner: { login: string }; nameWithOwner: string } | null;
+ repository: { nameWithOwner: string } | null;
+ assignees: { nodes: { login: string }[] };
+ reviewRequests: { nodes: { requestedReviewer: { login: string } | null }[] };
+ labels: { nodes: { name: string; color: string }[] };
+ additions: number;
+ deletions: number;
+ changedFiles: number;
+ comments: { totalCount: number };
+ reviewThreads: { totalCount: number };
+ reviewDecision: string | null;
+ latestReviews: {
+ totalCount: number;
+ nodes: { author: { login: string } | null }[];
+ };
+ commits: {
+ nodes: {
+ commit: {
+ statusCheckRollup: { state: string } | null;
+ };
+ }[];
+ };
+}
+
+interface GraphQLPRSearchResponse {
+ search: {
+ issueCount: number;
+ pageInfo: { hasNextPage: boolean; endCursor: string | null };
+ nodes: (GraphQLPRNode | null)[];
+ };
+ rateLimit?: { remaining: number; resetAt: string };
+}
+
+interface ForkCandidate {
+ databaseId: number;
+ headOwner: string;
+ headRepo: string;
+ sha: string;
+}
+
+interface ForkRepoResult {
+ object: { statusCheckRollup: { state: string } | null } | null;
+}
+
+interface ForkQueryResponse {
+ rateLimit?: { remaining: number; resetAt: string };
+ [key: string]: ForkRepoResult | { remaining: number; resetAt: string } | undefined | null;
+}
+
+// ── GraphQL search query constants ───────────────────────────────────────────
+
+const ISSUES_SEARCH_QUERY = `
+ query($q: String!, $cursor: String) {
+ search(query: $q, type: ISSUE, first: 100, after: $cursor) {
+ issueCount
+ pageInfo { hasNextPage endCursor }
+ nodes {
+ ... on Issue {
+ databaseId
+ number
+ title
+ state
+ url
+ createdAt
+ updatedAt
+ author { login avatarUrl }
+ labels(first: 20) { nodes { name color } }
+ assignees(first: 20) { nodes { login } }
+ repository { nameWithOwner }
+ comments { totalCount }
+ }
+ }
+ }
+ rateLimit { remaining resetAt }
+ }
+`;
+
+const PR_SEARCH_QUERY = `
+ query($q: String!, $cursor: String) {
+ # GitHub search API uses type: ISSUE for both issues and PRs
+ search(query: $q, type: ISSUE, first: 100, after: $cursor) {
+ issueCount
+ pageInfo { hasNextPage endCursor }
+ nodes {
+ ... on PullRequest {
+ databaseId
+ number
+ title
+ state
+ isDraft
+ url
+ createdAt
+ updatedAt
+ author { login avatarUrl }
+ headRefOid
+ headRefName
+ baseRefName
+ headRepository { owner { login } nameWithOwner }
+ repository { nameWithOwner }
+ assignees(first: 20) { nodes { login } }
+ reviewRequests(first: 20) {
+ # Team reviewers are excluded (only User fragment matched)
+ nodes { requestedReviewer { ... on User { login } } }
+ }
+ labels(first: 20) { nodes { name color } }
+ additions
+ deletions
+ changedFiles
+ comments { totalCount }
+ reviewThreads { totalCount }
+ reviewDecision
+ latestReviews(first: 15) {
+ totalCount
+ nodes { author { login } }
+ }
+ commits(last: 1) {
+ nodes {
+ commit {
+ statusCheckRollup { state }
+ }
+ }
+ }
+ }
+ }
+ }
+ rateLimit { remaining resetAt }
+ }
+`;
+
+// ── GraphQL search functions ──────────────────────────────────────────────────
+
+interface SearchPageResult {
+ issueCount: number;
+ pageInfo: { hasNextPage: boolean; endCursor: string | null };
+ nodes: (T | null)[];
+}
+
/**
- * Paginated search. Returns up to 1000 items per query.
- * Search API has its own rate limit: 30 req/min (separate from core 5000/hr).
- * Does NOT use IDB caching — search results are volatile and the poll interval
- * already gates how often we call.
+ * Paginates a single GraphQL search query string, collecting results via a
+ * caller-provided `processNode` callback. Handles partial errors, cap enforcement,
+ * and rate limit tracking. Returns the count of items added by processNode.
*/
-async function searchAllPages(
- octokit: NonNullable>,
- query: string
-): Promise {
- const items: RawSearchItem[] = [];
- let page = 1;
- const perPage = 100;
+async function paginateGraphQLSearch; rateLimit?: { remaining: number; resetAt: string } }, TNode>(
+ octokit: GitHubOctokit,
+ query: string,
+ queryString: string,
+ batchLabel: string,
+ errors: ApiError[],
+ processNode: (node: TNode) => boolean, // returns true if node was added (for cap counting)
+ currentCount: () => number,
+ cap: number,
+): Promise<{ capReached: boolean }> {
+ let cursor: string | null = null;
+ let capReached = false;
while (true) {
- const response = await octokit.request("GET /search/issues", {
- q: query,
- per_page: perPage,
- page,
- sort: "updated",
- order: "desc",
- });
+ try {
+ let response: TResponse;
+ let isPartial = false;
+ try {
+ response = await octokit.graphql(query, { q: queryString, cursor });
+ } catch (err) {
+ const partial = extractSearchPartialData(err);
+ if (partial) {
+ response = partial;
+ isPartial = true;
+ const { message } = extractRejectionError(err);
+ errors.push({ repo: batchLabel, statusCode: null, message, retryable: true });
+ } else {
+ const { statusCode, message } = extractRejectionError(err);
+ errors.push({
+ repo: batchLabel,
+ statusCode,
+ message,
+ retryable: statusCode === null || statusCode >= 500,
+ });
+ break;
+ }
+ }
- updateRateLimitFromHeaders(response.headers as Record, "search");
- const data = response.data as unknown as RawSearchResponse;
- items.push(...data.items);
-
- if (
- items.length >= data.total_count ||
- items.length >= 1000 ||
- data.items.length < perPage
- ) {
- if (data.incomplete_results) {
- console.warn(
- `[api] Search results incomplete for: ${query.slice(0, 80)}…`
- );
- pushNotification("search", "Search results may be incomplete — GitHub returned partial data", "warning");
+ if (response.rateLimit) updateGraphqlRateLimit(response.rateLimit);
+
+ for (const node of response.search.nodes) {
+ if (currentCount() >= cap) {
+ capReached = true;
+ break;
+ }
+ if (!node) continue;
+ processNode(node);
}
- if (items.length >= 1000 && data.total_count > 1000) {
- console.warn(
- `[api] Search results capped at 1000 (${data.total_count} total) for: ${query.slice(0, 80)}…`
- );
- pushNotification("search", `Search results capped at 1,000 of ${data.total_count.toLocaleString()} total — some items are hidden`, "warning");
+
+ if (capReached) {
+ return { capReached: true };
}
+
+ if (isPartial) break;
+
+ if (currentCount() >= cap) {
+ return { capReached: true };
+ }
+
+ if (!response.search.pageInfo.hasNextPage || !response.search.pageInfo.endCursor) break;
+ cursor = response.search.pageInfo.endCursor;
+ } catch (err) {
+ const { message } = extractRejectionError(err);
+ errors.push({ repo: batchLabel, statusCode: null, message, retryable: false });
break;
}
- page++;
}
- return items;
+ return { capReached };
}
-/**
- * Runs a search query across batched repo qualifiers, deduplicating results.
- * Splits repos into chunks of SEARCH_REPO_BATCH_SIZE to keep query length safe.
- */
-interface BatchSearchResult {
- items: RawSearchItem[];
- errors: ApiError[];
+function buildRepoQualifiers(repos: RepoRef[]): string {
+ return repos
+ .filter((r) => VALID_REPO_NAME.test(r.fullName))
+ .map((r) => `repo:${r.fullName}`)
+ .join(" ");
}
-async function batchedSearch(
- octokit: NonNullable>,
- baseQuery: string,
- repos: RepoRef[]
-): Promise {
- if (repos.length === 0) return { items: [], errors: [] };
+/**
+ * Fetches open issues via GraphQL search, using cursor-based pagination.
+ * Batches repos into chunks of SEARCH_REPO_BATCH_SIZE and runs chunks in parallel.
+ */
+async function graphqlSearchIssues(
+ octokit: GitHubOctokit,
+ repos: RepoRef[],
+ userLogin: string
+): Promise {
+ if (!VALID_LOGIN.test(userLogin)) return { issues: [], errors: [{ repo: "search", statusCode: null, message: "Invalid userLogin", retryable: false }] };
- // Run search batches sequentially to avoid exceeding the 30 req/min search rate limit.
- // With multiple search types (issues, PR involves, PR review-requested) running concurrently,
- // parallel batches can quickly exhaust the shared search budget.
const chunks = chunkArray(repos, SEARCH_REPO_BATCH_SIZE);
- const results: PromiseSettledResult[] = [];
- for (const chunk of chunks) {
- const repoQualifiers = chunk.map((r) => `repo:${r.fullName}`).join(" ");
- const result = await Promise.allSettled([searchAllPages(octokit, `${baseQuery} ${repoQualifiers}`)]);
- results.push(result[0]);
- }
const seen = new Set();
- const items: RawSearchItem[] = [];
+ const issues: Issue[] = [];
const errors: ApiError[] = [];
+ const CAP = 1000;
+
+ const chunkResults = await Promise.allSettled(chunks.map(async (chunk, chunkIdx) => {
+ const repoQualifiers = buildRepoQualifiers(chunk);
+ const queryString = `is:issue is:open involves:${userLogin} ${repoQualifiers}`;
+
+ await paginateGraphQLSearch(
+ octokit, ISSUES_SEARCH_QUERY, queryString,
+ `search-batch-${chunkIdx + 1}/${chunks.length}`,
+ errors,
+ (node) => {
+ if (node.databaseId == null || !node.repository) return false;
+ if (seen.has(node.databaseId)) return false;
+ seen.add(node.databaseId);
+ issues.push({
+ id: node.databaseId,
+ number: node.number,
+ title: node.title,
+ state: node.state,
+ htmlUrl: node.url,
+ createdAt: node.createdAt,
+ updatedAt: node.updatedAt,
+ userLogin: node.author?.login ?? "",
+ userAvatarUrl: node.author?.avatarUrl ?? "",
+ labels: node.labels.nodes.map((l) => ({ name: l.name, color: l.color })),
+ assigneeLogins: node.assignees.nodes.map((a) => a.login),
+ repoFullName: node.repository.nameWithOwner,
+ comments: node.comments.totalCount,
+ });
+ return true;
+ },
+ () => issues.length,
+ CAP,
+ );
+ }));
- for (let i = 0; i < results.length; i++) {
- const result = results[i];
- if (result.status !== "fulfilled") {
+ for (const result of chunkResults) {
+ if (result.status === "rejected") {
const { statusCode, message } = extractRejectionError(result.reason);
- errors.push({
- repo: `search-batch-${i + 1}/${chunks.length}`,
- statusCode,
- message,
- retryable: statusCode === null || statusCode >= 500,
- });
- continue;
+ errors.push({ repo: "search-batch", statusCode, message, retryable: statusCode === null || statusCode >= 500 });
}
- for (const item of result.value) {
- if (seen.has(item.id)) continue;
- seen.add(item.id);
- items.push(item);
+ }
+
+ if (issues.length >= CAP) {
+ console.warn(`[api] Issue search results capped at ${CAP}`);
+ pushNotification("search/issues", `Issue search results capped at 1,000 — some items are hidden`, "warning");
+ issues.splice(CAP);
+ }
+
+ return { issues, errors };
+}
+
+/**
+ * Maps a GraphQL statusCheckRollup state string to the app's CheckStatus type.
+ */
+function mapCheckStatus(state: string | null | undefined): CheckStatus["status"] {
+ if (state === "FAILURE" || state === "ERROR" || state === "ACTION_REQUIRED") return "failure";
+ if (state === "PENDING" || state === "EXPECTED" || state === "QUEUED") return "pending";
+ if (state === "SUCCESS") return "success";
+ return null;
+}
+
+/**
+ * Maps a GraphQL reviewDecision string to the typed union or null.
+ */
+function mapReviewDecision(
+ raw: string | null | undefined
+): "APPROVED" | "CHANGES_REQUESTED" | "REVIEW_REQUIRED" | null {
+ if (
+ raw === "APPROVED" ||
+ raw === "CHANGES_REQUESTED" ||
+ raw === "REVIEW_REQUIRED"
+ ) {
+ return raw;
+ }
+ return null;
+}
+
+/**
+ * Fetches open PRs via GraphQL search with two queries (involves + review-requested),
+ * deduplicates by databaseId, and handles fork PR statusCheckRollup fallback.
+ * Chunks run in parallel; fork fallback batches run in parallel.
+ */
+async function graphqlSearchPRs(
+ octokit: GitHubOctokit,
+ repos: RepoRef[],
+ userLogin: string
+): Promise {
+ if (!VALID_LOGIN.test(userLogin)) return { pullRequests: [], errors: [{ repo: "pr-search", statusCode: null, message: "Invalid userLogin", retryable: false }] };
+
+ const chunks = chunkArray(repos, SEARCH_REPO_BATCH_SIZE);
+ const prMap = new Map();
+ const forkInfoMap = new Map();
+ const errors: ApiError[] = [];
+ const CAP = 1000;
+
+ function processPRNode(node: GraphQLPRNode): boolean {
+ if (node.databaseId == null || !node.repository) return false;
+ if (prMap.has(node.databaseId)) return false;
+
+ const pendingLogins = node.reviewRequests.nodes
+ .map((n) => n.requestedReviewer?.login)
+ .filter((l): l is string => l != null);
+ const actualLogins = node.latestReviews.nodes
+ .map((n) => n.author?.login)
+ .filter((l): l is string => l != null);
+ const reviewerLogins = [...new Set([...pendingLogins, ...actualLogins].map(l => l.toLowerCase()))];
+
+ const rawState = node.commits.nodes[0]?.commit?.statusCheckRollup?.state ?? null;
+
+ // Store fork info for fallback detection
+ if (node.headRepository) {
+ const parts = node.headRepository.nameWithOwner.split("/");
+ if (parts.length === 2) {
+ forkInfoMap.set(node.databaseId, { owner: node.headRepository.owner.login, repoName: parts[1] });
+ }
+ }
+
+ prMap.set(node.databaseId, {
+ id: node.databaseId,
+ number: node.number,
+ title: node.title,
+ state: node.state,
+ draft: node.isDraft,
+ htmlUrl: node.url,
+ createdAt: node.createdAt,
+ updatedAt: node.updatedAt,
+ userLogin: node.author?.login ?? "",
+ userAvatarUrl: node.author?.avatarUrl ?? "",
+ headSha: node.headRefOid,
+ headRef: node.headRefName,
+ baseRef: node.baseRefName,
+ assigneeLogins: node.assignees.nodes.map((a) => a.login),
+ reviewerLogins,
+ repoFullName: node.repository.nameWithOwner,
+ checkStatus: mapCheckStatus(rawState),
+ additions: node.additions,
+ deletions: node.deletions,
+ changedFiles: node.changedFiles,
+ comments: node.comments.totalCount,
+ reviewThreads: node.reviewThreads.totalCount,
+ labels: node.labels.nodes.map((l) => ({ name: l.name, color: l.color })),
+ reviewDecision: mapReviewDecision(node.reviewDecision),
+ totalReviewCount: node.latestReviews.totalCount,
+ });
+ return true;
+ }
+
+ // Run involves and review-requested searches across all repo chunks in parallel
+ const queryTypes = [
+ `is:pr is:open involves:${userLogin}`,
+ `is:pr is:open review-requested:${userLogin}`,
+ ];
+
+ const allTasks = queryTypes.flatMap((queryType) =>
+ chunks.map(async (chunk, chunkIdx) => {
+ const repoQualifiers = buildRepoQualifiers(chunk);
+ const queryString = `${queryType} ${repoQualifiers}`;
+ await paginateGraphQLSearch(
+ octokit, PR_SEARCH_QUERY, queryString,
+ `pr-search-batch-${chunkIdx + 1}/${chunks.length}`,
+ errors, processPRNode, () => prMap.size, CAP,
+ );
+ })
+ );
+
+ const taskResults = await Promise.allSettled(allTasks);
+ for (const result of taskResults) {
+ if (result.status === "rejected") {
+ const { statusCode, message } = extractRejectionError(result.reason);
+ errors.push({ repo: "pr-search-batch", statusCode, message, retryable: statusCode === null || statusCode >= 500 });
}
}
- return { items, errors };
+ if (prMap.size >= CAP) {
+ console.warn(`[api] PR search results capped at ${CAP}`);
+ pushNotification("search/prs", `PR search results capped at 1,000 — some items are hidden`, "warning");
+ }
+
+ // Fork PR fallback: for PRs with null checkStatus where head repo owner differs from base
+ const forkCandidates: ForkCandidate[] = [];
+ for (const [databaseId, pr] of prMap) {
+ if (pr.checkStatus !== null) continue;
+ const headInfo = forkInfoMap.get(databaseId);
+ if (!headInfo) continue;
+ const baseOwner = pr.repoFullName.split("/")[0].toLowerCase();
+ if (headInfo.owner.toLowerCase() === baseOwner) continue;
+ forkCandidates.push({ databaseId, headOwner: headInfo.owner, headRepo: headInfo.repoName, sha: pr.headSha });
+ }
+
+ if (forkCandidates.length > 0) {
+ const forkChunks = chunkArray(forkCandidates, GRAPHQL_CHECK_BATCH_SIZE);
+ // Run fork fallback batches in parallel
+ await Promise.allSettled(forkChunks.map(async (forkChunk) => {
+ const varDefs: string[] = [];
+ const variables: Record = {};
+ const fragments: string[] = [];
+
+ for (let i = 0; i < forkChunk.length; i++) {
+ varDefs.push(`$owner${i}: String!`, `$repo${i}: String!`, `$sha${i}: String!`);
+ variables[`owner${i}`] = forkChunk[i].headOwner;
+ variables[`repo${i}`] = forkChunk[i].headRepo;
+ variables[`sha${i}`] = forkChunk[i].sha;
+ fragments.push(
+ `fork${i}: repository(owner: $owner${i}, name: $repo${i}) {
+ object(expression: $sha${i}) {
+ ... on Commit {
+ statusCheckRollup { state }
+ }
+ }
+ }`
+ );
+ }
+
+ const forkQuery = `query(${varDefs.join(", ")}) {\n${fragments.join("\n")}\nrateLimit { remaining resetAt }\n}`;
+
+ try {
+ const forkResponse = await octokit.graphql(forkQuery, variables);
+ if (forkResponse.rateLimit) updateGraphqlRateLimit(forkResponse.rateLimit as { remaining: number; resetAt: string });
+
+ for (let i = 0; i < forkChunk.length; i++) {
+ const data = forkResponse[`fork${i}`] as ForkRepoResult | null | undefined;
+ const state = data?.object?.statusCheckRollup?.state ?? null;
+ const pr = prMap.get(forkChunk[i].databaseId);
+ if (pr) pr.checkStatus = mapCheckStatus(state);
+ }
+ } catch (err) {
+ // Extract partial data from GraphqlResponseError — some fork aliases may have resolved
+ const partialData = (err && typeof err === "object" && "data" in err && err.data && typeof err.data === "object")
+ ? err.data as Record
+ : null;
+
+ if (partialData) {
+ for (let i = 0; i < forkChunk.length; i++) {
+ const data = partialData[`fork${i}`];
+ if (!data) continue;
+ const state = data.object?.statusCheckRollup?.state ?? null;
+ const pr = prMap.get(forkChunk[i].databaseId);
+ if (pr) pr.checkStatus = mapCheckStatus(state);
+ }
+ }
+
+ console.warn("[api] Fork PR statusCheckRollup fallback partially failed:", err);
+ pushNotification("graphql", "Fork PR check status unavailable — CI status may be missing for some PRs", "warning");
+ }
+ }));
+ }
+
+ const pullRequests = [...prMap.values()];
+ if (pullRequests.length >= CAP) pullRequests.splice(CAP);
+ return { pullRequests, errors };
}
// ── Step 1: fetchOrgs ────────────────────────────────────────────────────────
@@ -390,17 +778,6 @@ export async function fetchRepos(
const repos: RepoEntry[] = [];
- function collectRepos(page: RawRepo[], into: RepoEntry[]): void {
- for (const repo of page) {
- into.push({
- owner: repo.owner.login,
- name: repo.name,
- fullName: repo.full_name,
- pushedAt: repo.pushed_at ?? null,
- });
- }
- }
-
if (type === "org") {
for await (const response of octokit.paginate.iterator(`GET /orgs/{org}/repos`, {
org: orgOrUser,
@@ -408,7 +785,9 @@ export async function fetchRepos(
sort: "pushed" as const,
direction: "desc" as const,
})) {
- collectRepos(response.data as RawRepo[], repos);
+ for (const repo of response.data as RawRepo[]) {
+ repos.push({ owner: repo.owner.login, name: repo.name, fullName: repo.full_name, pushedAt: repo.pushed_at ?? null });
+ }
}
} else {
for await (const response of octokit.paginate.iterator(`GET /user/repos`, {
@@ -417,21 +796,20 @@ export async function fetchRepos(
sort: "pushed" as const,
direction: "desc" as const,
})) {
- collectRepos(response.data as RawRepo[], repos);
+ for (const repo of response.data as RawRepo[]) {
+ repos.push({ owner: repo.owner.login, name: repo.name, fullName: repo.full_name, pushedAt: repo.pushed_at ?? null });
+ }
}
}
return repos;
}
-// ── Step 3: fetchIssues (Search API) ─────────────────────────────────────────
+// ── Step 3: fetchIssues (GraphQL Search) ─────────────────────────────────────
/**
* Fetches open issues across repos where the user is involved (author, assignee,
- * mentioned, or commenter) using the GitHub Search API.
- *
- * Before: 3 API calls per repo (creator/assignee/mentioned) = 225 calls for 75 repos.
- * After: ~3 search calls total (batched in chunks of 30 repos).
+ * mentioned, or commenter) using GraphQL search queries, batched in chunks of 30 repos.
*/
export interface FetchIssuesResult {
issues: Issue[];
@@ -445,329 +823,11 @@ export async function fetchIssues(
): Promise {
if (!octokit) throw new Error("No GitHub client available");
if (repos.length === 0 || !userLogin) return { issues: [], errors: [] };
-
- const { items, errors } = await batchedSearch(
- octokit,
- `is:issue is:open involves:${userLogin}`,
- repos
- );
-
- const issues = items
- .filter((item) => item.pull_request === undefined && getRepoFullName(item) != null)
- .map((item) => ({
- id: item.id,
- number: item.number,
- title: item.title,
- state: item.state,
- htmlUrl: item.html_url,
- createdAt: item.created_at,
- updatedAt: item.updated_at,
- userLogin: item.user?.login ?? "",
- userAvatarUrl: item.user?.avatar_url ?? "",
- labels: item.labels.map((l) => ({ name: l.name, color: l.color })),
- assigneeLogins: item.assignees.map((a) => a.login),
- repoFullName: getRepoFullName(item)!,
- comments: item.comments,
- }));
-
- return { issues, errors };
-}
-
-// ── Step 4: fetchPullRequests (Search API + GraphQL check status) ─────────────
-
-interface CheckStatusResult {
- checkStatus: CheckStatus["status"];
- reviewDecision: "APPROVED" | "CHANGES_REQUESTED" | "REVIEW_REQUIRED" | null;
- actualReviewerLogins: string[];
- totalReviewCount: number;
-}
-
-type GitHubOctokit = NonNullable>;
-
-/**
- * REST fallback for check status + reviews when GraphQL is unavailable.
- * Uses the core REST rate limit (5000/hr, separate from GraphQL 5000 pts/hr).
- * All requests go through cachedRequest for ETag-based caching.
- *
- * Fetches both the legacy Status API and the Check Runs API in parallel, then
- * combines their results so GitHub Actions workflows (which use Check Runs) are
- * correctly reflected. This makes REST a full-fidelity fallback for GraphQL.
- */
-async function restFallbackCheckStatuses(
- octokit: GitHubOctokit,
- prs: { owner: string; repo: string; sha: string; prNumber: number }[],
- results: Map
-): Promise {
- // Process in chunks of 10 to avoid overwhelming the browser's 6-connection limit
- const REST_CONCURRENCY = 10;
- const chunks = chunkArray(prs, REST_CONCURRENCY);
- for (const chunk of chunks) {
- const tasks = chunk.map(async (pr) => {
- const key = `${pr.owner}/${pr.repo}:${pr.sha}`;
- try {
- // Fetch legacy Status API, Check Runs API, and PR reviews in parallel
- const [statusResult, checkRunsResult, reviewsResult] = await Promise.all([
- cachedRequest(
- octokit,
- `rest-status:${key}`,
- "GET /repos/{owner}/{repo}/commits/{ref}/status",
- { owner: pr.owner, repo: pr.repo, ref: pr.sha }
- ),
- cachedRequest(
- octokit,
- `rest-check-runs:${key}`,
- "GET /repos/{owner}/{repo}/commits/{ref}/check-runs",
- { owner: pr.owner, repo: pr.repo, ref: pr.sha }
- ),
- cachedRequest(
- octokit,
- `rest-reviews:${pr.owner}/${pr.repo}:${pr.prNumber}`,
- "GET /repos/{owner}/{repo}/pulls/{pull_number}/reviews",
- { owner: pr.owner, repo: pr.repo, pull_number: pr.prNumber }
- ),
- ]);
-
- const statusData = statusResult.data as { state: string; total_count: number };
- const checkRunsData = checkRunsResult.data as {
- check_runs: { status: string; conclusion: string | null }[];
- };
- const reviews = reviewsResult.data as { user: { login: string } | null; state: string }[];
-
- // Derive combined check status from both endpoints.
- // Status API returns state:"pending" with total_count:0 when no statuses exist.
- // Check Runs API returns an empty array when no check runs exist.
- // If BOTH are empty → no CI configured → null.
- const noLegacyStatuses = statusData.total_count === 0;
- const noCheckRuns = checkRunsData.check_runs.length === 0;
-
- let checkStatus: CheckStatus["status"];
- if (noLegacyStatuses && noCheckRuns) {
- checkStatus = null;
- } else {
- const legacyFailed =
- statusData.state === "failure" || statusData.state === "error";
- const checkRunFailed = checkRunsData.check_runs.some(
- (cr) => cr.conclusion === "failure" || cr.conclusion === "timed_out" || cr.conclusion === "cancelled"
- );
-
- if (legacyFailed || checkRunFailed) {
- checkStatus = "failure";
- } else {
- const legacySuccess = statusData.state === "success" || noLegacyStatuses;
- const allCheckRunsComplete = noCheckRuns ||
- checkRunsData.check_runs.every((cr) => cr.status === "completed");
- const allCheckRunsSuccess = checkRunsData.check_runs.every(
- (cr) => cr.conclusion === "success" || cr.conclusion === "skipped" || cr.conclusion === "neutral"
- );
-
- if (legacySuccess && allCheckRunsComplete && allCheckRunsSuccess) {
- checkStatus = "success";
- } else {
- checkStatus = "pending";
- }
- }
- }
-
- // Derive review decision from latest review per author.
- // Include COMMENTED to make REVIEW_REQUIRED reachable (comments without approval).
- const latestByAuthor = new Map();
- for (const review of reviews) {
- if (review.user?.login && (review.state === "APPROVED" || review.state === "CHANGES_REQUESTED" || review.state === "COMMENTED")) {
- latestByAuthor.set(review.user.login.toLowerCase(), review.state);
- }
- }
- let reviewDecision: CheckStatusResult["reviewDecision"] = null;
- if (latestByAuthor.size > 0) {
- const states = [...latestByAuthor.values()];
- if (states.some((s) => s === "CHANGES_REQUESTED")) reviewDecision = "CHANGES_REQUESTED";
- else if (states.every((s) => s === "APPROVED")) reviewDecision = "APPROVED";
- else reviewDecision = "REVIEW_REQUIRED";
- }
-
- const actualReviewerLogins = reviews
- .filter((r) => r.user?.login)
- .map((r) => r.user!.login);
- // Deduplicate reviewer logins
- const uniqueReviewers = [...new Set(actualReviewerLogins)];
-
- results.set(key, { checkStatus, reviewDecision, actualReviewerLogins: uniqueReviewers, totalReviewCount: reviews.length });
- } catch (err) {
- console.warn(`[api] REST fallback failed for ${key}:`, err);
- results.set(key, { checkStatus: null, reviewDecision: null, actualReviewerLogins: [], totalReviewCount: 0 });
- }
- });
-
- await Promise.allSettled(tasks);
- }
+ return graphqlSearchIssues(octokit, repos, userLogin);
}
-/**
- * Batches check status lookups into a single GraphQL call using
- * `statusCheckRollup.state`, which combines both legacy commit status API
- * and modern check runs into one field.
- *
- * Replaces 2N REST calls (commit status + check runs) with 1 GraphQL call.
- * Uses parameterized variables to prevent injection.
- *
- * For fork PRs, `pr.head.sha` exists only in the fork repo, not the base repo.
- * The `object(expression:)` lookup must use the head repo (fork), while
- * `pullRequest(number:)` must use the base repo. We handle this by emitting a
- * separate `objRepo${i}` alias pointing at the head repo when it differs from
- * the base repo, and reusing the base repo alias otherwise.
- */
-async function batchFetchCheckStatuses(
- octokit: NonNullable>,
- prs: { owner: string; repo: string; sha: string; prNumber: number }[]
-): Promise