Skip to content

Commit

Permalink
fix(langchain): do not throw when result is undefined (#4933)
Browse files Browse the repository at this point in the history
* do not throw when result is undefined

* remove `expect` require
  • Loading branch information
sabrenner authored Nov 22, 2024
1 parent d058f42 commit d3b9b3a
Show file tree
Hide file tree
Showing 3 changed files with 110 additions and 2 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class LangChainChatModelHandler extends LangChainLanguageModelHandler {

this.extractTokenMetrics(ctx.currentStore?.span, result)

for (const messageSetIdx in result.generations) {
for (const messageSetIdx in result?.generations) {
const messageSet = result.generations[messageSetIdx]

for (const chatCompletionIdx in messageSet) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class LangChainLLMHandler extends LangChainLanguageModelHandler {

this.extractTokenMetrics(ctx.currentStore?.span, result)

for (const completionIdx in result.generations) {
for (const completionIdx in result?.generations) {
const completion = result.generations[completionIdx]
if (this.isPromptCompletionSampled()) {
tags[`langchain.response.completions.${completionIdx}.text`] = this.normalize(completion[0].text) || ''
Expand Down
108 changes: 108 additions & 0 deletions packages/datadog-plugin-langchain/test/index.spec.js
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,33 @@ describe('Plugin', () => {
})

describe('llm', () => {
it('does not tag output on error', async () => {
nock('https://api.openai.com').post('/v1/completions').reply(403)

const checkTraces = agent
.use(traces => {
expect(traces[0].length).to.equal(1)

const span = traces[0][0]

const langchainResponseRegex = /^langchain\.response\.completions\./
const hasMatching = Object.keys(span.meta).some(key => langchainResponseRegex.test(key))

expect(hasMatching).to.be.false

expect(span.meta).to.have.property('error.message')
expect(span.meta).to.have.property('error.type')
expect(span.meta).to.have.property('error.stack')
})

try {
const llm = new langchainOpenai.OpenAI({ model: 'gpt-3.5-turbo-instruct', maxRetries: 0 })
await llm.generate(['what is 2 + 2?'])
} catch {}

await checkTraces
})

it('instruments a langchain llm call for a single prompt', async () => {
stubCall({
...openAiBaseCompletionInfo,
Expand Down Expand Up @@ -270,6 +297,32 @@ describe('Plugin', () => {
})

describe('chat model', () => {
it('does not tag output on error', async () => {
nock('https://api.openai.com').post('/v1/chat/completions').reply(403)

const checkTraces = agent
.use(traces => {
expect(traces[0].length).to.equal(1)

const span = traces[0][0]

const langchainResponseRegex = /^langchain\.response\.completions\./
const hasMatching = Object.keys(span.meta).some(key => langchainResponseRegex.test(key))
expect(hasMatching).to.be.false

expect(span.meta).to.have.property('error.message')
expect(span.meta).to.have.property('error.type')
expect(span.meta).to.have.property('error.stack')
})

try {
const chatModel = new langchainOpenai.ChatOpenAI({ model: 'gpt-4', maxRetries: 0 })
await chatModel.invoke('Hello!')
} catch {}

await checkTraces
})

it('instruments a langchain openai chat model call for a single string prompt', async () => {
stubCall({
...openAiBaseChatInfo,
Expand Down Expand Up @@ -546,6 +599,37 @@ describe('Plugin', () => {
})

describe('chain', () => {
it('does not tag output on error', async () => {
nock('https://api.openai.com').post('/v1/chat/completions').reply(403)

const checkTraces = agent
.use(traces => {
expect(traces[0].length).to.equal(2)

const chainSpan = traces[0][0]

const langchainResponseRegex = /^langchain\.response\.outputs\./

const hasMatching = Object.keys(chainSpan.meta).some(key => langchainResponseRegex.test(key))
expect(hasMatching).to.be.false

expect(chainSpan.meta).to.have.property('error.message')
expect(chainSpan.meta).to.have.property('error.type')
expect(chainSpan.meta).to.have.property('error.stack')
})

try {
const model = new langchainOpenai.ChatOpenAI({ model: 'gpt-4', maxRetries: 0 })
const parser = new langchainOutputParsers.StringOutputParser()

const chain = model.pipe(parser)

await chain.invoke('Hello!')
} catch {}

await checkTraces
})

it('instruments a langchain chain with a single openai chat model call', async () => {
stubCall({
...openAiBaseChatInfo,
Expand Down Expand Up @@ -790,6 +874,30 @@ describe('Plugin', () => {

describe('embeddings', () => {
describe('@langchain/openai', () => {
it('does not tag output on error', async () => {
nock('https://api.openai.com').post('/v1/embeddings').reply(403)

const checkTraces = agent
.use(traces => {
expect(traces[0].length).to.equal(1)

const span = traces[0][0]

expect(span.meta).to.not.have.property('langchain.response.outputs.embedding_length')

expect(span.meta).to.have.property('error.message')
expect(span.meta).to.have.property('error.type')
expect(span.meta).to.have.property('error.stack')
})

try {
const embeddings = new langchainOpenai.OpenAIEmbeddings()
await embeddings.embedQuery('Hello, world!')
} catch {}

await checkTraces
})

it('instruments a langchain openai embedQuery call', async () => {
stubCall({
...openAiBaseEmbeddingInfo,
Expand Down

0 comments on commit d3b9b3a

Please sign in to comment.