async function processUserContent(user, content) {
// Create the main trace for the entire content processing workflow
const mainTrace = basalt.monitor.createTrace('content-workflow', {
name: 'Content Processing Workflow',
input: content,
user: {
id: user.id,
name: user.name,
email: user.email
},
organization: {
id: user.organizationId,
name: user.organizationName
},
metadata: {
contentType: 'article',
contentLength: content.length,
requestTimestamp: new Date().toISOString()
}
})
try {
// Step 1: Content Analysis
const analysisLog = mainTrace.createLog({
name: 'content-analysis',
type: 'span',
input: content
})
// Get a prompt from Basalt for topic extraction
const { value: topicPrompt, error: promptError, generation: topicGeneration } =
await basalt.prompt.get('topic-extractor', {
variables: {
content: content
}
})
if (promptError) {
throw promptError
}
// Append the generation to our analysis log
analysisLog.append(topicGeneration)
// Call your LLM provider
const topics = await yourLLMProvider.complete(topicPrompt.text)
// Record the generation result
topicGeneration.end(topics)
// Update analysis log with results
analysisLog.update({
metadata: {
topicsIdentified: topics.split(',').length,
mainTopic: topics.split(',')[0]
}
})
analysisLog.end(`Identified topics: ${topics}`)
// Step 2: Run enhancement and translation in parallel
const parallelLog = mainTrace.createLog({
name: 'parallel-processing',
type: 'span',
input: content,
metadata: {
operations: ['enhancement', 'translation']
}
})
// Create two tasks that will run in parallel
const enhancementPromise = enhanceContent(content, parallelLog)
const translationPromise = translateContent(content, parallelLog)
// Wait for both operations to complete
const [enhancedContent, translatedContent] = await Promise.all([
enhancementPromise,
translationPromise
])
parallelLog.end()
// Step 3: Summarization
const summaryGeneration = mainTrace.createGeneration({
name: 'content-summarization',
prompt: {
slug: 'summarizer'
},
input: enhancedContent,
variables: {
length: 'short',
style: 'professional'
},
metadata: {
originalContentLength: content.length,
enhancedContentLength: enhancedContent.length
}
})
// Generate the summary
const summary = await yourLLMProvider.complete({
prompt: `Summarize the following content: ${enhancedContent}`,
model: 'gpt-4o'
})
// Record the summary with token counts and cost
summaryGeneration.end({
output: summary,
inputTokens: calculateTokens(enhancedContent),
outputTokens: calculateTokens(summary),
cost: 0.02 // Example cost in USD
})
// Complete the main trace with final results
mainTrace.update({
metadata: {
processingSteps: 3,
totalProcessingTime: new Date().getTime() - new Date(mainTrace.startTime).getTime(),
enhancedContentAvailable: true,
translationAvailable: true,
summaryAvailable: true
}
})
// End the trace with the final output
mainTrace.end(summary)
return {
topics,
enhancedContent,
translatedContent,
summary
}
} catch (error) {
// Record the error
mainTrace.update({
metadata: {
error: {
name: error.name,
message: error.message,
stack: error.stack
},
status: 'failed'
}
})
// Always end the trace, even in error cases
mainTrace.end(`Error: ${error.message}`)
throw error
}
}
// Helper functions for the content processing
async function enhanceContent(content, parentLog) {
const enhancementLog = parentLog.createLog({
name: 'content-enhancement',
type: 'span',
input: content
})
const generation = enhancementLog.createGeneration({
name: 'enhance-text',
prompt: {
slug: 'content-enhancer'
},
input: content
})
// Call your LLM provider
const enhancedContent = await yourLLMProvider.complete({
prompt: `Enhance the following content: ${content}`,
model: 'gpt-4o'
})
generation.end(enhancedContent)
enhancementLog.end(enhancedContent)
return enhancedContent
}
async function translateContent(content, parentLog) {
const translationLog = parentLog.createLog({
name: 'content-translation',
type: 'span',
input: content
})
const generation = translationLog.createGeneration({
name: 'translate-text',
prompt: {
slug: 'translator'
},
input: content,
variables: {
targetLanguage: 'Spanish'
}
})
// Call your LLM provider
const translatedContent = await yourLLMProvider.complete({
prompt: `Translate the following content to Spanish: ${content}`,
model: 'gpt-4o'
})
// Let Basalt calculate tokens and cost automatically
generation.end(translatedContent)
translationLog.end(translatedContent)
return translatedContent
}