from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from agentbasis.frameworks.langchain import get_callback_config
# Using get_callback_config() is a convenient shortcut
config = get_callback_config()
chain = LLMChain(
llm=llm,
prompt=PromptTemplate.from_template("Answer this: {query}")
)
# The trace will show the Chain execution wrapping the LLM call
result = chain.invoke(
{"query": "What is the capital of France?"},
config=config
)