Usage

Lisette usage and cost monitoring
import litellm, importlib, httpx
from lisette.core import Chat, AsyncChat, patch_litellm
from cachy import enable_cachy, disable_cachy
from fastcore.test import *

Lisette Usage Logger

importlib.reload(litellm); # to re-run the notebook without kernel restart
# litellm._turn_on_debug()
patch_litellm()

The precomputed response cost provided is available in kwargs['response_cost'] according to the litellm docs:


source

LisetteUsageLogger


def LisetteUsageLogger(
    db_path
):

Args: turn_off_message_logging: bool - if True, the message logging will be turned off. Message and response will be redacted from StandardLoggingPayload. message_logging: bool - deprecated param, use turn_off_message_logging instead

Cost Utils

class PrefixDict(dict):
    def __getitem__(self, key):
        if key in self.keys(): return super().__getitem__(key)
        for k in self.keys(): 
            if key.startswith(k): return super().__getitem__(k)
        raise KeyError(key)
model_prices = PrefixDict({'claude-sonnet-4-5':
    dict(input_prc = 3/1e6, cache_write_prc = 3.75/1e6, cache_read_prc = 0.3/1e6, output_prc = 15/1e6, web_search_prc = 10/1e3)
})

Simplified cost utils to demonstrate total cost calculation (use Usage.response_cost in prod):

@patch(as_prop=True)
def inp_cost(self:UsageStats):         return model_prices[self.model]['input_prc'] * (self.prompt_tokens - self.cache_read_tokens)
@patch(as_prop=True)
def cache_write_cost(self:UsageStats): return model_prices[self.model]['cache_write_prc'] * self.cache_creation_tokens
@patch(as_prop=True)
def cache_read_cost(self:UsageStats):  return model_prices[self.model]['cache_read_prc'] * self.cache_read_tokens
@patch(as_prop=True)
def out_cost(self:UsageStats):         return model_prices[self.model]['output_prc'] * self.completion_tokens
@patch(as_prop=True)
def web_cost(self:UsageStats):         return model_prices[self.model]['web_search_prc'] * ifnone(self.web_search_requests, 0)
@patch(as_prop=True)
def cost(self:UsageStats):             return self.inp_cost + self.cache_write_cost + self.cache_read_cost + self.out_cost + self.web_cost

A mapping of model pricing is also available in litellm, which is used to calculate the response_cost

model_pricing = dict2obj(httpx.get(litellm.model_cost_map_url).json())
# model_pricing['claude-sonnet-4-5']
# model_pricing['gemini-3-pro-preview']

Examples

from tempfile import NamedTemporaryFile
tf =NamedTemporaryFile(suffix='.db')
@patch
def user_id_fn(self:LisetteUsageLogger): return 'user-123'
tf=NamedTemporaryFile(suffix='.db')
logger = LisetteUsageLogger(tf.name)
litellm.callbacks = [logger]
slc = ','.join('id model user_id prompt_tokens completion_tokens total_tokens cached_tokens cache_creation_tokens cache_read_tokens web_search_requests response_cost'.split())
# litellm.set_verbose = True

A simple example:

chat = Chat('claude-sonnet-4-5-20250929')
r = chat("What is 2+2?")
time.sleep(0.1)
u = logger.usage(select=slc)[-1]; u
Usage(id=1, timestamp=UNSET, model='claude-sonnet-4-5-20250929', user_id='user-123', prompt_tokens=14, completion_tokens=11, total_tokens=25, cached_tokens=0, cache_creation_tokens=0, cache_read_tokens=0, web_search_requests=0, response_cost=0.000207)

Our calculated cost matches litellm’s response_cost. In some cases it might be better to use the custom calculation as we’ll see in the remaining of this notebook:

test_eq(u.cost, u.response_cost)

Now, let’s test with streaming:

chat = Chat('claude-sonnet-4-5')
res = chat("Count from 1 to 5", stream=True)
for o in res: pass
time.sleep(0.1)
u = logger.usage(select=slc)[-1]; u
Usage(id=2, timestamp=UNSET, model='claude-sonnet-4-5', user_id='user-123', prompt_tokens=15, completion_tokens=17, total_tokens=32, cached_tokens=0, cache_creation_tokens=0, cache_read_tokens=0, web_search_requests=0, response_cost=0.00030000000000000003)
test_eq(u.cost, u.response_cost)

Streaming logged successfully. Let’s also verify async chat calls are logged properly.

chat_async = AsyncChat('claude-sonnet-4-5-20250929')
await chat_async("What is 3+3?")

3 + 3 = 6

  • id: chatcmpl-xxx
  • model: claude-sonnet-4-5-20250929
  • finish_reason: stop
  • usage: Usage(completion_tokens=13, prompt_tokens=14, total_tokens=27, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=13, image_tokens=None), prompt_tokens_details=PromptTokensDetailsWrapper(audio_tokens=None, cached_tokens=0, text_tokens=None, image_tokens=None, cache_creation_tokens=0, cache_creation_token_details=CacheCreationTokenDetails(ephemeral_5m_input_tokens=0, ephemeral_1h_input_tokens=0)), cache_creation_input_tokens=0, cache_read_input_tokens=0, inference_geo='not_available', speed=None)
time.sleep(0.1)
u = logger.usage(select=slc)[-1]; u
Usage(id=3, timestamp=UNSET, model='claude-sonnet-4-5-20250929', user_id='user-123', prompt_tokens=14, completion_tokens=13, total_tokens=27, cached_tokens=0, cache_creation_tokens=0, cache_read_tokens=0, web_search_requests=0, response_cost=0.00023700000000000001)
test_eq(u.cost, u.response_cost)

Finally, let’s test async streaming to ensure all API patterns are covered.

res = await chat_async("Count from 10 to 15", stream=True)
async for o in res: pass
print(o)
ModelResponse(id='chatcmpl-xxx', created=1000000000, model='claude-sonnet-4-5-20250929', object='chat.completion', system_fingerprint=None, choices=[Choices(finish_reason='stop', index=0, message=Message(content='10, 11, 12, 13, 14, 15', role='assistant', tool_calls=None, function_call=None, provider_specific_fields=None))], usage=Usage(completion_tokens=20, prompt_tokens=38, total_tokens=58, completion_tokens_details=CompletionTokensDetailsWrapper(accepted_prediction_tokens=None, audio_tokens=None, reasoning_tokens=0, rejected_prediction_tokens=None, text_tokens=None, image_tokens=None), prompt_tokens_details=None))
time.sleep(0.1)
u = logger.usage(select=slc)[-1]; u
Usage(id=4, timestamp=UNSET, model='claude-sonnet-4-5-20250929', user_id='user-123', prompt_tokens=38, completion_tokens=20, total_tokens=58, cached_tokens=0, cache_creation_tokens=0, cache_read_tokens=0, web_search_requests=0, response_cost=0.00041400000000000003)
test_eq(u.cost, u.response_cost)

Search with streaming

Web search with streaming:

Important

Gemini web search requests are part of prompt_tokens_details which is only included with stream_options={"include_usage": True} when stream=True.

There is currently a bug with gemini web search request counts, Issue and PR. Waiting for litellm 1.80.11 pypi release.

chat = Chat(flash)
res = chat("What is the weather like in NYC? Search web.", search="m", stream=True, stream_options={"include_usage": True})
for o in res: pass
# print(o)
time.sleep(0.1)
u = logger.usage(select=slc)[-1]; u
Usage(id=7, timestamp=UNSET, model='gemini-3-flash-preview', user_id='user-123', prompt_tokens=12, completion_tokens=307, total_tokens=319, cached_tokens=None, cache_creation_tokens=None, cache_read_tokens=None, web_search_requests=1, response_cost=0.035927)
Important

Anthropic web search requests are available in usage.server_tool_use

chat = Chat('claude-sonnet-4-5')
res = chat("What is the weather like in NYC now? Search web", search="m", stream=True, stream_options={"include_usage": True})
for o in res: pass
# print(o)
time.sleep(0.1)
u = logger.usage(select=slc)[-1]; u
Usage(id=9, timestamp=UNSET, model='claude-sonnet-4-5', user_id='user-123', prompt_tokens=9121, completion_tokens=216, total_tokens=9337, cached_tokens=0, cache_creation_tokens=0, cache_read_tokens=0, web_search_requests=1, response_cost=0.030603)
test_eq(u.cost, u.response_cost + u.web_search_requests * model_prices[u.model]['web_search_prc'])
test_eq(len(logger.usage()), 8)

source

total_cost


def total_cost(
    sc:float=0.01
):

Call self as a function.

L(logger.usage()).attrgot('response_cost').sum()
0.09923900000000001
disable_cachy()

A simple Gemini example (requires min tokens and running twice to see cached_tokens):

# #| notest
# chat = Chat('gemini/gemini-2.5-flash')
# chat("What is 2+2?"* 500)
# time.sleep(5)
# chat("What is 2+2?"* 500)
# #| notest
# time.sleep(0.1) # wait for callback db write
# u = logger.usage(select=slc)[-1];u
# #| notest
# test_eq(len(logger.usage()), 10)
# test_eq(logger.usage()[-1].cached_tokens > 3000, True)
tf.close()