diff --git a/.size-limit.js b/.size-limit.js index 32d5d19e1495..a86e2b0bdb2c 100644 --- a/.size-limit.js +++ b/.size-limit.js @@ -103,7 +103,7 @@ module.exports = [ path: 'packages/browser/build/npm/esm/index.js', import: createImport('init', 'feedbackAsyncIntegration'), gzip: true, - limit: '34 KB', + limit: '35 KB', }, // React SDK (ESM) { diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/init.js b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/init.js new file mode 100644 index 000000000000..d90a3acf6157 --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/init.js @@ -0,0 +1,9 @@ +import * as Sentry from '@sentry/browser'; + +window.Sentry = Sentry; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + tracesSampleRate: 1, + debug: true, +}); diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/mocks.js b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/mocks.js new file mode 100644 index 000000000000..01c6c31ce596 --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/mocks.js @@ -0,0 +1,55 @@ +// Mock Anthropic client for browser testing +export class MockAnthropic { + constructor(config) { + this.apiKey = config.apiKey; + + // Main focus: messages.create functionality + this.messages = { + create: async (...args) => { + const params = args[0]; + // Simulate processing time + await new Promise(resolve => setTimeout(resolve, 10)); + + if (params.model === 'error-model') { + const error = new Error('Model not found'); + error.status = 404; + error.headers = { 'x-request-id': 'mock-request-123' }; + throw error; + } + + const response = { + id: 'msg_mock123', + type: 'message', + role: 'assistant', + model: params.model, + content: [ + { + type: 'text', + text: 'Hello from Anthropic mock!', + }, + ], + stop_reason: 'end_turn', + stop_sequence: null, + usage: { + input_tokens: 10, + output_tokens: 15, + cache_creation_input_tokens: 0, + cache_read_input_tokens: 0, + }, + }; + return response; + }, + countTokens: async (..._args) => ({ id: 'mock', type: 'model', model: 'mock', input_tokens: 0 }), + }; + + // Minimal implementations for required interface compliance + this.models = { + list: async (..._args) => ({ id: 'mock', type: 'model', model: 'mock' }), + get: async (..._args) => ({ id: 'mock', type: 'model', model: 'mock' }), + }; + + this.completions = { + create: async (..._args) => ({ id: 'mock', type: 'completion', model: 'mock' }), + }; + } +} diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/subject.js b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/subject.js new file mode 100644 index 000000000000..febfe938139e --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/subject.js @@ -0,0 +1,19 @@ +import { instrumentAnthropicAiClient } from '@sentry/browser'; +import { MockAnthropic } from './mocks.js'; + +const mockClient = new MockAnthropic({ + apiKey: 'mock-api-key', +}); + +const client = instrumentAnthropicAiClient(mockClient); + +// Test that manual instrumentation doesn't crash the browser +// The instrumentation automatically creates spans +const response = await client.messages.create({ + model: 'claude-3-haiku-20240307', + messages: [{ role: 'user', content: 'What is the capital of France?' }], + temperature: 0.7, + max_tokens: 100, +}); + +console.log('Received response', response); diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts new file mode 100644 index 000000000000..206e29be16e5 --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/anthropic/test.ts @@ -0,0 +1,36 @@ +import { expect } from '@playwright/test'; +import { sentryTest } from '../../../../utils/fixtures'; +import { envelopeRequestParser, waitForTransactionRequest } from '../../../../utils/helpers'; + +// These tests are not exhaustive because the instrumentation is +// already tested in the node integration tests and we merely +// want to test that the instrumentation does not crash in the browser +// and that gen_ai transactions are sent. + +sentryTest('manual Anthropic instrumentation sends gen_ai transactions', async ({ getLocalTestUrl, page }) => { + const transactionPromise = waitForTransactionRequest(page, event => { + return !!event.transaction?.includes('claude-3-haiku-20240307'); + }); + + const url = await getLocalTestUrl({ testDir: __dirname }); + await page.goto(url); + + const req = await transactionPromise; + + const eventData = envelopeRequestParser(req); + + // Verify it's a gen_ai transaction + expect(eventData.transaction).toBe('messages claude-3-haiku-20240307'); + expect(eventData.contexts?.trace?.op).toBe('gen_ai.messages'); + expect(eventData.contexts?.trace?.origin).toBe('auto.ai.anthropic'); + expect(eventData.contexts?.trace?.data).toMatchObject({ + 'gen_ai.operation.name': 'messages', + 'gen_ai.system': 'anthropic', + 'gen_ai.request.model': 'claude-3-haiku-20240307', + 'gen_ai.request.temperature': 0.7, + 'gen_ai.response.model': 'claude-3-haiku-20240307', + 'gen_ai.response.id': 'msg_mock123', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 15, + }); +}); diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/google-genai/init.js b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/google-genai/init.js new file mode 100644 index 000000000000..d90a3acf6157 --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/google-genai/init.js @@ -0,0 +1,9 @@ +import * as Sentry from '@sentry/browser'; + +window.Sentry = Sentry; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + tracesSampleRate: 1, + debug: true, +}); diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/google-genai/mocks.js b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/google-genai/mocks.js new file mode 100644 index 000000000000..8aab37fb3a1e --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/google-genai/mocks.js @@ -0,0 +1,118 @@ +// Mock Google GenAI client for browser testing +export class MockGoogleGenAI { + constructor(config) { + this.apiKey = config.apiKey; + + // models.generateContent functionality + this.models = { + generateContent: async (...args) => { + const params = args[0]; + // Simulate processing time + await new Promise(resolve => setTimeout(resolve, 10)); + + if (params.model === 'error-model') { + const error = new Error('Model not found'); + error.status = 404; + error.headers = { 'x-request-id': 'mock-request-123' }; + throw error; + } + + return { + candidates: [ + { + content: { + parts: [ + { + text: 'Hello from Google GenAI mock!', + }, + ], + role: 'model', + }, + finishReason: 'stop', + index: 0, + }, + ], + usageMetadata: { + promptTokenCount: 8, + candidatesTokenCount: 12, + totalTokenCount: 20, + }, + }; + }, + generateContentStream: async () => { + // Return a promise that resolves to an async generator + return (async function* () { + yield { + candidates: [ + { + content: { + parts: [{ text: 'Streaming response' }], + role: 'model', + }, + finishReason: 'stop', + index: 0, + }, + ], + }; + })(); + }, + }; + + // chats.create implementation + this.chats = { + create: (...args) => { + const params = args[0]; + const model = params.model; + + return { + modelVersion: model, + sendMessage: async (..._messageArgs) => { + // Simulate processing time + await new Promise(resolve => setTimeout(resolve, 10)); + + const response = { + candidates: [ + { + content: { + parts: [ + { + text: 'This is a joke from the chat!', + }, + ], + role: 'model', + }, + finishReason: 'stop', + index: 0, + }, + ], + usageMetadata: { + promptTokenCount: 8, + candidatesTokenCount: 12, + totalTokenCount: 20, + }, + modelVersion: model, // Include model version in response + }; + return response; + }, + sendMessageStream: async () => { + // Return a promise that resolves to an async generator + return (async function* () { + yield { + candidates: [ + { + content: { + parts: [{ text: 'Streaming chat response' }], + role: 'model', + }, + finishReason: 'stop', + index: 0, + }, + ], + }; + })(); + }, + }; + }, + }; + } +} diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/google-genai/subject.js b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/google-genai/subject.js new file mode 100644 index 000000000000..14b95f2b6942 --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/google-genai/subject.js @@ -0,0 +1,32 @@ +import { instrumentGoogleGenAIClient } from '@sentry/browser'; +import { MockGoogleGenAI } from './mocks.js'; + +const mockClient = new MockGoogleGenAI({ + apiKey: 'mock-api-key', +}); + +const client = instrumentGoogleGenAIClient(mockClient); + +// Test that manual instrumentation doesn't crash the browser +// The instrumentation automatically creates spans +// Test both chats and models APIs +const chat = client.chats.create({ + model: 'gemini-1.5-pro', + config: { + temperature: 0.8, + topP: 0.9, + maxOutputTokens: 150, + }, + history: [ + { + role: 'user', + parts: [{ text: 'Hello, how are you?' }], + }, + ], +}); + +const response = await chat.sendMessage({ + message: 'Tell me a joke', +}); + +console.log('Received response', response); diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/google-genai/test.ts b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/google-genai/test.ts new file mode 100644 index 000000000000..6774129f183e --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/google-genai/test.ts @@ -0,0 +1,31 @@ +import { expect } from '@playwright/test'; +import { sentryTest } from '../../../../utils/fixtures'; +import { envelopeRequestParser, waitForTransactionRequest } from '../../../../utils/helpers'; + +// These tests are not exhaustive because the instrumentation is +// already tested in the node integration tests and we merely +// want to test that the instrumentation does not crash in the browser +// and that gen_ai transactions are sent. + +sentryTest('manual Google GenAI instrumentation sends gen_ai transactions', async ({ getLocalTestUrl, page }) => { + const transactionPromise = waitForTransactionRequest(page, event => { + return !!event.transaction?.includes('gemini-1.5-pro'); + }); + + const url = await getLocalTestUrl({ testDir: __dirname }); + await page.goto(url); + + const req = await transactionPromise; + + const eventData = envelopeRequestParser(req); + + // Verify it's a gen_ai transaction + expect(eventData.transaction).toBe('chat gemini-1.5-pro create'); + expect(eventData.contexts?.trace?.op).toBe('gen_ai.chat'); + expect(eventData.contexts?.trace?.origin).toBe('auto.ai.google_genai'); + expect(eventData.contexts?.trace?.data).toMatchObject({ + 'gen_ai.operation.name': 'chat', + 'gen_ai.system': 'google_genai', + 'gen_ai.request.model': 'gemini-1.5-pro', + }); +}); diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/openai/init.js b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/openai/init.js new file mode 100644 index 000000000000..d90a3acf6157 --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/openai/init.js @@ -0,0 +1,9 @@ +import * as Sentry from '@sentry/browser'; + +window.Sentry = Sentry; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + tracesSampleRate: 1, + debug: true, +}); diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/openai/mocks.js b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/openai/mocks.js new file mode 100644 index 000000000000..a1fe56dd30c2 --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/openai/mocks.js @@ -0,0 +1,47 @@ +// Mock OpenAI client for browser testing +export class MockOpenAi { + constructor(config) { + this.apiKey = config.apiKey; + + this.chat = { + completions: { + create: async (...args) => { + const params = args[0]; + // Simulate processing time + await new Promise(resolve => setTimeout(resolve, 10)); + + if (params.model === 'error-model') { + const error = new Error('Model not found'); + error.status = 404; + error.headers = { 'x-request-id': 'mock-request-123' }; + throw error; + } + + const response = { + id: 'chatcmpl-mock123', + object: 'chat.completion', + created: 1677652288, + model: params.model, + system_fingerprint: 'fp_44709d6fcb', + choices: [ + { + index: 0, + message: { + role: 'assistant', + content: 'Hello from OpenAI mock!', + }, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 15, + total_tokens: 25, + }, + }; + return response; + }, + }, + }; + } +} diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/openai/subject.js b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/openai/subject.js new file mode 100644 index 000000000000..aadc2864ceee --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/openai/subject.js @@ -0,0 +1,22 @@ +import { instrumentOpenAiClient } from '@sentry/browser'; +import { MockOpenAi } from './mocks.js'; + +const mockClient = new MockOpenAi({ + apiKey: 'mock-api-key', +}); + +const client = instrumentOpenAiClient(mockClient); + +// Test that manual instrumentation doesn't crash the browser +// The instrumentation automatically creates spans +const response = await client.chat.completions.create({ + model: 'gpt-3.5-turbo', + messages: [ + { role: 'system', content: 'You are a helpful assistant.' }, + { role: 'user', content: 'What is the capital of France?' }, + ], + temperature: 0.7, + max_tokens: 100, +}); + +console.log('Received response', response); diff --git a/dev-packages/browser-integration-tests/suites/tracing/ai-providers/openai/test.ts b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/openai/test.ts new file mode 100644 index 000000000000..c71c0786ff96 --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/ai-providers/openai/test.ts @@ -0,0 +1,37 @@ +import { expect } from '@playwright/test'; +import { sentryTest } from '../../../../utils/fixtures'; +import { envelopeRequestParser, waitForTransactionRequest } from '../../../../utils/helpers'; + +// These tests are not exhaustive because the instrumentation is +// already tested in the node integration tests and we merely +// want to test that the instrumentation does not crash in the browser +// and that gen_ai transactions are sent. + +sentryTest('manual OpenAI instrumentation sends gen_ai transactions', async ({ getLocalTestUrl, page }) => { + const transactionPromise = waitForTransactionRequest(page, event => { + return !!event.transaction?.includes('gpt-3.5-turbo'); + }); + + const url = await getLocalTestUrl({ testDir: __dirname }); + await page.goto(url); + + const req = await transactionPromise; + + const eventData = envelopeRequestParser(req); + + // Verify it's a gen_ai transaction + expect(eventData.transaction).toBe('chat gpt-3.5-turbo'); + expect(eventData.contexts?.trace?.op).toBe('gen_ai.chat'); + expect(eventData.contexts?.trace?.origin).toBe('auto.ai.openai'); + expect(eventData.contexts?.trace?.data).toMatchObject({ + 'gen_ai.operation.name': 'chat', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'gpt-3.5-turbo', + 'gen_ai.request.temperature': 0.7, + 'gen_ai.response.model': 'gpt-3.5-turbo', + 'gen_ai.response.id': 'chatcmpl-mock123', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 15, + 'gen_ai.usage.total_tokens': 25, + }); +}); diff --git a/dev-packages/browser-integration-tests/utils/generatePlugin.ts b/dev-packages/browser-integration-tests/utils/generatePlugin.ts index bd505473f9b7..0a90b5e2be23 100644 --- a/dev-packages/browser-integration-tests/utils/generatePlugin.ts +++ b/dev-packages/browser-integration-tests/utils/generatePlugin.ts @@ -37,6 +37,9 @@ const IMPORTED_INTEGRATION_CDN_BUNDLE_PATHS: Record = { moduleMetadataIntegration: 'modulemetadata', graphqlClientIntegration: 'graphqlclient', browserProfilingIntegration: 'browserprofiling', + instrumentAnthropicAiClient: 'instrumentanthropicaiclient', + instrumentOpenAiClient: 'instrumentopenaiclient', + instrumentGoogleGenAIClient: 'instrumentgooglegenaiclient', // technically, this is not an integration, but let's add it anyway for simplicity makeMultiplexedTransport: 'multiplexedtransport', }; diff --git a/packages/browser/rollup.bundle.config.mjs b/packages/browser/rollup.bundle.config.mjs index 705ec3dfe1c1..4893e66f49ef 100644 --- a/packages/browser/rollup.bundle.config.mjs +++ b/packages/browser/rollup.bundle.config.mjs @@ -13,6 +13,9 @@ const reexportedPluggableIntegrationFiles = [ 'modulemetadata', 'graphqlclient', 'spotlight', + 'instrumentanthropicaiclient', + 'instrumentopenaiclient', + 'instrumentgooglegenaiclient', ]; browserPluggableIntegrationFiles.forEach(integrationName => { diff --git a/packages/browser/src/index.ts b/packages/browser/src/index.ts index 5e9924fe6da5..ae13e984c85f 100644 --- a/packages/browser/src/index.ts +++ b/packages/browser/src/index.ts @@ -63,6 +63,9 @@ export { zodErrorsIntegration, thirdPartyErrorFilterIntegration, featureFlagsIntegration, + instrumentAnthropicAiClient, + instrumentOpenAiClient, + instrumentGoogleGenAIClient, logger, } from '@sentry/core'; export type { Span, FeatureFlagsIntegration } from '@sentry/core'; diff --git a/packages/browser/src/integrations-bundle/index.instrumentanthropicaiclient.ts b/packages/browser/src/integrations-bundle/index.instrumentanthropicaiclient.ts new file mode 100644 index 000000000000..d82909a524d8 --- /dev/null +++ b/packages/browser/src/integrations-bundle/index.instrumentanthropicaiclient.ts @@ -0,0 +1 @@ +export { instrumentAnthropicAiClient } from '@sentry/core'; diff --git a/packages/browser/src/integrations-bundle/index.instrumentgooglegenaiclient.ts b/packages/browser/src/integrations-bundle/index.instrumentgooglegenaiclient.ts new file mode 100644 index 000000000000..ec58139c0681 --- /dev/null +++ b/packages/browser/src/integrations-bundle/index.instrumentgooglegenaiclient.ts @@ -0,0 +1 @@ +export { instrumentGoogleGenAIClient } from '@sentry/core'; diff --git a/packages/browser/src/integrations-bundle/index.instrumentopenaiclient.ts b/packages/browser/src/integrations-bundle/index.instrumentopenaiclient.ts new file mode 100644 index 000000000000..5371961ff03a --- /dev/null +++ b/packages/browser/src/integrations-bundle/index.instrumentopenaiclient.ts @@ -0,0 +1 @@ +export { instrumentOpenAiClient } from '@sentry/core'; diff --git a/packages/browser/src/utils/lazyLoadIntegration.ts b/packages/browser/src/utils/lazyLoadIntegration.ts index 569e902fde28..6d5e48542f56 100644 --- a/packages/browser/src/utils/lazyLoadIntegration.ts +++ b/packages/browser/src/utils/lazyLoadIntegration.ts @@ -21,6 +21,9 @@ const LazyLoadableIntegrations = { rewriteFramesIntegration: 'rewriteframes', browserProfilingIntegration: 'browserprofiling', moduleMetadataIntegration: 'modulemetadata', + instrumentAnthropicAiClient: 'instrumentanthropicaiclient', + instrumentOpenAiClient: 'instrumentopenaiclient', + instrumentGoogleGenAIClient: 'instrumentgooglegenaiclient', } as const; const WindowWithMaybeIntegration = WINDOW as {