diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario.mjs index 9df798eed59e..9bfdd4a9793a 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario.mjs @@ -1,6 +1,7 @@ import * as Sentry from '@sentry/node'; import { generateText } from 'ai'; import { MockLanguageModelV1 } from 'ai/test'; +import { z } from 'zod'; async function run() { await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { @@ -30,6 +31,35 @@ async function run() { prompt: 'Where is the second span?', }); + // This span should include tool calls and tool results + await generateText({ + model: new MockLanguageModelV1({ + doGenerate: async () => ({ + rawCall: { rawPrompt: null, rawSettings: {} }, + finishReason: 'tool-calls', + usage: { promptTokens: 15, completionTokens: 25 }, + text: 'Tool call completed!', + toolCalls: [ + { + toolCallType: 'function', + toolCallId: 'call-1', + toolName: 'getWeather', + args: '{ "location": "San Francisco" }', + }, + ], + }), + }), + tools: { + getWeather: { + parameters: z.object({ location: z.string() }), + execute: async args => { + return `Weather in ${args.location}: Sunny, 72°F`; + }, + }, + }, + prompt: 'What is the weather in San Francisco?', + }); + // This span should not be captured because we've disabled telemetry await generateText({ experimental_telemetry: { isEnabled: false }, diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index 7876dbccb440..fdeec051389f 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -26,11 +26,11 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText', - 'sentry.op': 'ai.pipeline.generateText', + 'sentry.op': 'ai.pipeline.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, description: 'generateText', - op: 'ai.pipeline.generateText', + op: 'ai.pipeline.generate_text', origin: 'auto.vercelai.otel', status: 'ok', }), @@ -38,7 +38,7 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'ai.run.doGenerate', + 'sentry.op': 'gen_ai.generate_text', 'operation.name': 'ai.generateText.doGenerate', 'ai.operationId': 'ai.generateText.doGenerate', 'ai.model.provider': 'mock-provider', @@ -59,8 +59,8 @@ describe('Vercel AI integration', () => { 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.total_tokens': 30, }, - description: 'generateText.doGenerate', - op: 'ai.run.doGenerate', + description: 'generate_text mock-model-id', + op: 'gen_ai.generate_text', origin: 'auto.vercelai.otel', status: 'ok', }), @@ -83,11 +83,11 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText', - 'sentry.op': 'ai.pipeline.generateText', + 'sentry.op': 'ai.pipeline.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, description: 'generateText', - op: 'ai.pipeline.generateText', + op: 'ai.pipeline.generate_text', origin: 'auto.vercelai.otel', status: 'ok', }), @@ -95,7 +95,7 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'ai.run.doGenerate', + 'sentry.op': 'gen_ai.generate_text', 'operation.name': 'ai.generateText.doGenerate', 'ai.operationId': 'ai.generateText.doGenerate', 'ai.model.provider': 'mock-provider', @@ -119,8 +119,79 @@ describe('Vercel AI integration', () => { 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.total_tokens': 30, }, - description: 'generateText.doGenerate', - op: 'ai.run.doGenerate', + description: 'generate_text mock-model-id', + op: 'gen_ai.generate_text', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // Fifth span - tool call generateText span + expect.objectContaining({ + data: { + 'ai.model.id': 'mock-model-id', + 'ai.model.provider': 'mock-provider', + 'ai.operationId': 'ai.generateText', + 'ai.pipeline.name': 'generateText', + 'ai.response.finishReason': 'tool-calls', + 'ai.settings.maxRetries': 2, + 'ai.settings.maxSteps': 1, + 'ai.streaming': false, + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.usage.input_tokens': 15, + 'gen_ai.usage.output_tokens': 25, + 'gen_ai.usage.total_tokens': 40, + 'operation.name': 'ai.generateText', + 'sentry.op': 'ai.pipeline.generate_text', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'generateText', + op: 'ai.pipeline.generate_text', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // Sixth span - tool call doGenerate span + expect.objectContaining({ + data: { + 'ai.model.id': 'mock-model-id', + 'ai.model.provider': 'mock-provider', + 'ai.operationId': 'ai.generateText.doGenerate', + 'ai.pipeline.name': 'generateText.doGenerate', + 'ai.response.finishReason': 'tool-calls', + 'ai.response.id': expect.any(String), + 'ai.response.model': 'mock-model-id', + 'ai.response.timestamp': expect.any(String), + 'ai.settings.maxRetries': 2, + 'ai.streaming': false, + 'gen_ai.request.model': 'mock-model-id', + 'gen_ai.response.finish_reasons': ['tool-calls'], + 'gen_ai.response.id': expect.any(String), + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.system': 'mock-provider', + 'gen_ai.usage.input_tokens': 15, + 'gen_ai.usage.output_tokens': 25, + 'gen_ai.usage.total_tokens': 40, + 'operation.name': 'ai.generateText.doGenerate', + 'sentry.op': 'gen_ai.generate_text', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'generate_text mock-model-id', + op: 'gen_ai.generate_text', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // Seventh span - tool call execution span + expect.objectContaining({ + data: { + 'ai.operationId': 'ai.toolCall', + 'ai.toolCall.id': 'call-1', + 'ai.toolCall.name': 'getWeather', + 'gen_ai.tool.call.id': 'call-1', + 'gen_ai.tool.name': 'getWeather', + 'operation.name': 'ai.toolCall', + 'sentry.op': 'gen_ai.execute_tool', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'execute_tool getWeather', + op: 'gen_ai.execute_tool', origin: 'auto.vercelai.otel', status: 'ok', }), @@ -149,11 +220,11 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText', - 'sentry.op': 'ai.pipeline.generateText', + 'sentry.op': 'ai.pipeline.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, description: 'generateText', - op: 'ai.pipeline.generateText', + op: 'ai.pipeline.generate_text', origin: 'auto.vercelai.otel', status: 'ok', }), @@ -182,11 +253,11 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText.doGenerate', - 'sentry.op': 'ai.run.doGenerate', + 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, - description: 'generateText.doGenerate', - op: 'ai.run.doGenerate', + description: 'generate_text mock-model-id', + op: 'gen_ai.generate_text', origin: 'auto.vercelai.otel', status: 'ok', }), @@ -209,11 +280,11 @@ describe('Vercel AI integration', () => { 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText', - 'sentry.op': 'ai.pipeline.generateText', + 'sentry.op': 'ai.pipeline.generate_text', 'sentry.origin': 'auto.vercelai.otel', }, description: 'generateText', - op: 'ai.pipeline.generateText', + op: 'ai.pipeline.generate_text', origin: 'auto.vercelai.otel', status: 'ok', }), @@ -221,7 +292,7 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'ai.run.doGenerate', + 'sentry.op': 'gen_ai.generate_text', 'operation.name': 'ai.generateText.doGenerate', 'ai.operationId': 'ai.generateText.doGenerate', 'ai.model.provider': 'mock-provider', @@ -245,8 +316,91 @@ describe('Vercel AI integration', () => { 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.total_tokens': 30, }, - description: 'generateText.doGenerate', - op: 'ai.run.doGenerate', + description: 'generate_text mock-model-id', + op: 'gen_ai.generate_text', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true) + expect.objectContaining({ + data: { + 'ai.model.id': 'mock-model-id', + 'ai.model.provider': 'mock-provider', + 'ai.operationId': 'ai.generateText', + 'ai.pipeline.name': 'generateText', + 'ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', + 'ai.response.finishReason': 'tool-calls', + 'ai.response.text': 'Tool call completed!', + 'ai.response.toolCalls': expect.any(String), + 'ai.settings.maxRetries': 2, + 'ai.settings.maxSteps': 1, + 'ai.streaming': false, + 'gen_ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.usage.input_tokens': 15, + 'gen_ai.usage.output_tokens': 25, + 'gen_ai.usage.total_tokens': 40, + 'operation.name': 'ai.generateText', + 'sentry.op': 'ai.pipeline.generate_text', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'generateText', + op: 'ai.pipeline.generate_text', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true) + expect.objectContaining({ + data: { + 'ai.model.id': 'mock-model-id', + 'ai.model.provider': 'mock-provider', + 'ai.operationId': 'ai.generateText.doGenerate', + 'ai.pipeline.name': 'generateText.doGenerate', + 'ai.prompt.format': expect.any(String), + 'ai.prompt.messages': expect.any(String), + 'ai.prompt.toolChoice': expect.any(String), + 'ai.prompt.tools': expect.any(Array), + 'ai.response.finishReason': 'tool-calls', + 'ai.response.id': expect.any(String), + 'ai.response.model': 'mock-model-id', + 'ai.response.text': 'Tool call completed!', + 'ai.response.timestamp': expect.any(String), + 'ai.response.toolCalls': expect.any(String), + 'ai.settings.maxRetries': 2, + 'ai.streaming': false, + 'gen_ai.request.model': 'mock-model-id', + 'gen_ai.response.finish_reasons': ['tool-calls'], + 'gen_ai.response.id': expect.any(String), + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.system': 'mock-provider', + 'gen_ai.usage.input_tokens': 15, + 'gen_ai.usage.output_tokens': 25, + 'gen_ai.usage.total_tokens': 40, + 'operation.name': 'ai.generateText.doGenerate', + 'sentry.op': 'gen_ai.generate_text', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'generate_text mock-model-id', + op: 'gen_ai.generate_text', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // Seventh span - tool call execution span + expect.objectContaining({ + data: { + 'ai.operationId': 'ai.toolCall', + 'ai.toolCall.args': expect.any(String), + 'ai.toolCall.id': 'call-1', + 'ai.toolCall.name': 'getWeather', + 'ai.toolCall.result': expect.any(String), + 'gen_ai.tool.call.id': 'call-1', + 'gen_ai.tool.name': 'getWeather', + 'operation.name': 'ai.toolCall', + 'sentry.op': 'gen_ai.execute_tool', + 'sentry.origin': 'auto.vercelai.otel', + }, + description: 'execute_tool getWeather', + op: 'gen_ai.execute_tool', origin: 'auto.vercelai.otel', status: 'ok', }), diff --git a/packages/node/src/integrations/tracing/vercelai/attributes.ts b/packages/node/src/integrations/tracing/vercelai/ai_sdk_attributes.ts similarity index 100% rename from packages/node/src/integrations/tracing/vercelai/attributes.ts rename to packages/node/src/integrations/tracing/vercelai/ai_sdk_attributes.ts diff --git a/packages/node/src/integrations/tracing/vercelai/index.ts b/packages/node/src/integrations/tracing/vercelai/index.ts index 44bc2dca915f..2c5faf04acef 100644 --- a/packages/node/src/integrations/tracing/vercelai/index.ts +++ b/packages/node/src/integrations/tracing/vercelai/index.ts @@ -1,3 +1,4 @@ +/* eslint-disable @typescript-eslint/no-dynamic-delete */ /* eslint-disable complexity */ import type { IntegrationFn } from '@sentry/core'; import { defineIntegration, SEMANTIC_ATTRIBUTE_SENTRY_OP, spanToJSON } from '@sentry/core'; @@ -7,12 +8,15 @@ import { AI_MODEL_ID_ATTRIBUTE, AI_MODEL_PROVIDER_ATTRIBUTE, AI_PROMPT_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TOOL_CALL_ID_ATTRIBUTE, + AI_TOOL_CALL_NAME_ATTRIBUTE, AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, -} from './attributes'; +} from './ai_sdk_attributes'; import { INTEGRATION_NAME } from './constants'; import { SentryVercelAiInstrumentation } from './instrumentation'; import type { VercelAiOptions } from './types'; @@ -37,81 +41,30 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { return; } + // Tool call spans + // https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans + if ( + attributes[AI_TOOL_CALL_NAME_ATTRIBUTE] && + attributes[AI_TOOL_CALL_ID_ATTRIBUTE] && + name === 'ai.toolCall' + ) { + addOriginToSpan(span, 'auto.vercelai.otel'); + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.execute_tool'); + span.setAttribute('gen_ai.tool.call.id', attributes[AI_TOOL_CALL_ID_ATTRIBUTE]); + span.setAttribute('gen_ai.tool.name', attributes[AI_TOOL_CALL_NAME_ATTRIBUTE]); + span.updateName(`execute_tool ${attributes[AI_TOOL_CALL_NAME_ATTRIBUTE]}`); + return; + } + + // The AI and Provider must be defined for generate, stream, and embed spans. // The id of the model const aiModelId = attributes[AI_MODEL_ID_ATTRIBUTE]; - // the provider of the model const aiModelProvider = attributes[AI_MODEL_PROVIDER_ATTRIBUTE]; - - // both of these must be defined for the integration to work if (typeof aiModelId !== 'string' || typeof aiModelProvider !== 'string' || !aiModelId || !aiModelProvider) { return; } - let isPipelineSpan = false; - - switch (name) { - case 'ai.generateText': { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.pipeline.generateText'); - isPipelineSpan = true; - break; - } - case 'ai.generateText.doGenerate': { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.run.doGenerate'); - break; - } - case 'ai.streamText': { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.pipeline.streamText'); - isPipelineSpan = true; - break; - } - case 'ai.streamText.doStream': { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.run.doStream'); - break; - } - case 'ai.generateObject': { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.pipeline.generateObject'); - isPipelineSpan = true; - break; - } - case 'ai.generateObject.doGenerate': { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.run.doGenerate'); - break; - } - case 'ai.streamObject': { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.pipeline.streamObject'); - isPipelineSpan = true; - break; - } - case 'ai.streamObject.doStream': { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.run.doStream'); - break; - } - case 'ai.embed': { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.pipeline.embed'); - isPipelineSpan = true; - break; - } - case 'ai.embed.doEmbed': { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.embeddings'); - break; - } - case 'ai.embedMany': { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.pipeline.embedMany'); - isPipelineSpan = true; - break; - } - case 'ai.embedMany.doEmbed': { - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.embeddings'); - break; - } - case 'ai.toolCall': - case 'ai.stream.firstChunk': - case 'ai.stream.finish': - span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.run'); - break; - } - addOriginToSpan(span, 'auto.vercelai.otel'); const nameWthoutAi = name.replace('ai.', ''); @@ -119,9 +72,9 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { span.updateName(nameWthoutAi); // If a Telemetry name is set and it is a pipeline span, use that as the operation name - const functionId = attributes['ai.telemetry.functionId']; - if (functionId && typeof functionId === 'string' && isPipelineSpan) { - span.updateName(functionId); + const functionId = attributes[AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE]; + if (functionId && typeof functionId === 'string' && name.split('.').length - 1 === 1) { + span.updateName(`${nameWthoutAi} ${functionId}`); span.setAttribute('ai.pipeline.name', functionId); } @@ -132,6 +85,78 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { span.setAttribute(GEN_AI_RESPONSE_MODEL_ATTRIBUTE, attributes[AI_MODEL_ID_ATTRIBUTE]); } span.setAttribute('ai.streaming', name.includes('stream')); + + // Generate Spans + if (name === 'ai.generateText') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.pipeline.generate_text'); + return; + } + + if (name === 'ai.generateText.doGenerate') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.generate_text'); + span.updateName(`generate_text ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); + return; + } + + if (name === 'ai.streamText') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.pipeline.stream_text'); + return; + } + + if (name === 'ai.streamText.doStream') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.stream_text'); + span.updateName(`stream_text ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); + return; + } + + if (name === 'ai.generateObject') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.pipeline.generate_object'); + return; + } + + if (name === 'ai.generateObject.doGenerate') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.generate_object'); + span.updateName(`generate_object ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); + return; + } + + if (name === 'ai.streamObject') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.pipeline.stream_object'); + return; + } + + if (name === 'ai.streamObject.doStream') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.stream_object'); + span.updateName(`stream_object ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); + return; + } + + if (name === 'ai.embed') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.pipeline.embed'); + return; + } + + if (name === 'ai.embed.doEmbed') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.embed'); + span.updateName(`embed ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); + return; + } + + if (name === 'ai.embedMany') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.pipeline.embed_many'); + return; + } + + if (name === 'ai.embedMany.doEmbed') { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'gen_ai.embed_many'); + span.updateName(`embed_many ${attributes[AI_MODEL_ID_ATTRIBUTE]}`); + return; + } + + if (name.startsWith('ai.stream')) { + span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_OP, 'ai.run'); + return; + } }); client.addEventProcessor(event => { @@ -145,12 +170,10 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { if (attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE] != undefined) { attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] = attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]; - // eslint-disable-next-line @typescript-eslint/no-dynamic-delete delete attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]; } if (attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE] != undefined) { attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] = attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE]; - // eslint-disable-next-line @typescript-eslint/no-dynamic-delete delete attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE]; } if ( pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy