Compute
Deploy long-running LangChain agents in Chalk containers.
LangChain agents that call tools, browse the web, or execute code benefit from running in an isolated container with dedicated compute. Chalk Compute gives each agent its own environment with network access, persistent storage, and a public URL for webhooks or health checks.
This tutorial deploys a LangChain ReAct agent that uses tool-calling to answer research questions.
Create agent.py — a self-contained LangChain agent served over HTTP with FastAPI:
# agent.py
from fastapi import FastAPI
from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor, create_react_agent
from langchain_core.prompts import PromptTemplate
from langchain_community.tools import DuckDuckGoSearchRun
app = FastAPI()
llm = ChatOpenAI(model="gpt-4o", temperature=0)
tools = [DuckDuckGoSearchRun()]
prompt = PromptTemplate.from_template(
"Answer the following question using the tools available to you.\n\n"
"Tools: {tools}\nTool names: {tool_names}\n\n"
"Question: {input}\n{agent_scratchpad}"
)
agent = create_react_agent(llm, tools, prompt)
executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
@app.get("/ask")
def ask(q: str) -> dict:
result = executor.invoke({"input": q})
return {"answer": result["output"]}
@app.get("/health")
def health() -> dict:
return {"status": "ok"}Create deploy_langchain.py:
from chalkcompute import Container, Image
image = (
Image.base("python:3.12-slim")
.pip_install([
"fastapi",
"uvicorn",
"langchain",
"langchain-openai",
"langchain-community",
"duckduckgo-search",
])
.add_local_file("agent.py", "/app/agent.py")
)
container = Container(
image=image,
name="langchain-agent",
env={"OPENAI_API_KEY": "sk-..."},
port=8000,
entrypoint=[
"uvicorn", "agent:app",
"--host", "0.0.0.0",
"--port", "8000",
],
).run()
print(f"Agent URL: {container.info.web_url}")
print(f"Try: curl '{container.info.web_url}/ask?q=What+is+Chalk'")chalk compute deploy deploy_langchain.py
# ✓ Container created successfully
# Container ID: 7b2e91fa-04ac-4e7f-a912-3df1c8bae205
# Name: langchain-agent
# Status: Running
# Pod Name: chalk-container-langchain-agent
# URL: https://7b2e91fa-04ac-4e7f-a912-3df1c8bae205.compute.chalk.aiOnce the container is running, query the agent:
curl 'https://7b2e91fa-04ac-4e7f-a912-3df1c8bae205.compute.chalk.ai/ask?q=What+is+the+capital+of+France'
# {"answer": "The capital of France is Paris."}LangChain agents can persist conversation history or vector store data across restarts
using a Volume:
from chalkcompute import Container, Image, Volume
# Create a volume for the agent's memory
vol = Volume(name="agent-memory")
image = (
Image.base("python:3.12-slim")
.pip_install([
"fastapi",
"uvicorn",
"langchain",
"langchain-openai",
"langchain-community",
"duckduckgo-search",
"chromadb",
])
.add_local_file("agent.py", "/app/agent.py")
)
container = Container(
image=image,
name="langchain-agent-persistent",
env={"OPENAI_API_KEY": "sk-..."},
port=8000,
volumes={"agent-memory": "/app/memory"},
entrypoint=[
"uvicorn", "agent:app",
"--host", "0.0.0.0",
"--port", "8000",
],
).run()The volume at /app/memory survives container restarts — point ChromaDB or any
local vector store at that path to retain knowledge across sessions.
For workloads that need to handle many concurrent requests, run multiple agent containers behind a shared volume:
for i in range(3):
Container(
image=image,
name=f"langchain-agent-{i}",
env={"OPENAI_API_KEY": "sk-..."},
port=8000,
volumes={"agent-memory": "/app/memory"},
entrypoint=[
"uvicorn", "agent:app",
"--host", "0.0.0.0",
"--port", "8000",
],
).run()This example builds a LangChain agent tool that receives a financial transaction, enriches it with features from Chalk, runs a PyTorch risk model, and escalates high-risk transactions to a Kinesis review queue.
Assume your Chalk project defines features like these:
from chalk.features import features, Features
@features
class Transaction:
id: str
merchant_id: str
amount: float
merchant_category: str
merchant_risk_tier: int
customer_avg_spend_30d: float
customer_transaction_count_7d: int
country_code: strCreate risk_tool.py — a LangChain tool that queries Chalk, scores the transaction,
and posts flagged results to Kinesis:
# risk_tool.py
import json
import boto3
import torch
import torch.nn as nn
from chalkpy import ChalkClient
from langchain_core.tools import tool
RISK_THRESHOLD = 0.85
KINESIS_STREAM = "transaction-review-queue"
chalk = ChalkClient()
kinesis = boto3.client("kinesis", region_name="us-east-1")
class RiskModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.net = nn.Sequential(
nn.Linear(4, 32),
nn.ReLU(),
nn.Linear(32, 1),
nn.Sigmoid(),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.net(x)
# Load pre-trained weights
model = RiskModel()
model.load_state_dict(torch.load("/app/models/risk_model.pt", weights_only=True))
model.eval()
@tool
def score_transaction(transaction_id: str) -> str:
"""Score a financial transaction for fraud risk.
Retrieves enriched features from Chalk, runs a risk model,
and escalates to a review queue if the score exceeds the threshold.
"""
# 1. Query Chalk for enriched transaction features
result = chalk.query(
input={"transaction.id": transaction_id},
output=[
"transaction.amount",
"transaction.merchant_risk_tier",
"transaction.customer_avg_spend_30d",
"transaction.customer_transaction_count_7d",
"transaction.merchant_category",
"transaction.country_code",
],
)
amount = result.get_feature_value("transaction.amount")
merchant_risk_tier = result.get_feature_value("transaction.merchant_risk_tier")
avg_spend = result.get_feature_value("transaction.customer_avg_spend_30d")
txn_count = result.get_feature_value("transaction.customer_transaction_count_7d")
merchant_category = result.get_feature_value("transaction.merchant_category")
country = result.get_feature_value("transaction.country_code")
# 2. Run the risk model
features = torch.tensor([[
amount / max(avg_spend, 1.0), # spend ratio
float(merchant_risk_tier),
float(txn_count),
amount,
]])
with torch.no_grad():
risk_score = model(features).item()
# 3. Escalate if above threshold
if risk_score > RISK_THRESHOLD:
kinesis.put_record(
StreamName=KINESIS_STREAM,
Data=json.dumps({
"transaction_id": transaction_id,
"risk_score": round(risk_score, 4),
"amount": amount,
"merchant_category": merchant_category,
"country": country,
"reason": "automated_risk_score_exceeded",
}),
PartitionKey=transaction_id,
)
return (
f"Transaction {transaction_id}: risk score {risk_score:.2%} "
f"EXCEEDS threshold. Escalated to review queue."
)
return (
f"Transaction {transaction_id}: risk score {risk_score:.2%}. "
f"Below threshold — no action required."
)# agent.py
from fastapi import FastAPI
from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor, create_react_agent
from langchain_core.prompts import PromptTemplate
from risk_tool import score_transaction
app = FastAPI()
llm = ChatOpenAI(model="gpt-4o", temperature=0)
tools = [score_transaction]
prompt = PromptTemplate.from_template(
"You are a fraud analyst assistant. Use the score_transaction tool "
"to evaluate transactions when asked.\n\n"
"Tools: {tools}\nTool names: {tool_names}\n\n"
"Question: {input}\n{agent_scratchpad}"
)
agent = create_react_agent(llm, tools, prompt)
executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
@app.get("/review")
def review(transaction_id: str) -> dict:
result = executor.invoke({
"input": f"Score transaction {transaction_id} for fraud risk."
})
return {"result": result["output"]}from chalkcompute import Container, Image, Volume
vol = Volume("risk-models")
vol.put_file("risk_model.pt", open("risk_model.pt", "rb").read())
image = (
Image.debian_slim("3.12")
.pip_install([
"fastapi",
"uvicorn",
"langchain",
"langchain-openai",
"chalkpy",
"torch",
"boto3",
])
.add_local_file("agent.py", "/app/agent.py")
.add_local_file("risk_tool.py", "/app/risk_tool.py")
)
container = Container(
image=image,
name="fraud-review-agent",
env={
"OPENAI_API_KEY": "sk-...",
"CHALK_CLIENT_ID": "...",
"CHALK_CLIENT_SECRET": "...",
},
port=8000,
volumes={"risk-models": "/app/models"},
entrypoint=[
"uvicorn", "agent:app",
"--host", "0.0.0.0",
"--port", "8000",
],
).run()
print(f"Agent URL: {container.info.web_url}")Query the agent:
curl 'https://7b2e91fa-04ac-4e7f-a912-3df1c8bae205.compute.chalk.ai/review?transaction_id=txn_8a3f2c'
# {"result": "Transaction txn_8a3f2c: risk score 92.31% EXCEEDS threshold. Escalated to review queue."}