@@ -25,7 +25,6 @@ python chat.py
2525
2626``` python
2727import asyncio
28- from typing import cast
2928
3029from copilot import CopilotClient
3130from copilot.generated.session_events import AssistantMessageData
@@ -39,8 +38,8 @@ async def main():
3938 done = asyncio.Event()
4039
4140 def on_event (event ):
42- if event.type.value == " assistant.message" :
43- print (cast(AssistantMessageData, event.data) .content)
41+ if event.type.value == " assistant.message" and isinstance (event.data, AssistantMessageData) :
42+ print (event.data.content)
4443 elif event.type.value == " session.idle" :
4544 done.set()
4645
@@ -59,7 +58,6 @@ If you need more control over the lifecycle, you can call `start()`, `stop()`, a
5958
6059``` python
6160import asyncio
62- from typing import cast
6361
6462from copilot import CopilotClient
6563from copilot.generated.session_events import AssistantMessageData
@@ -78,8 +76,8 @@ async def main():
7876 done = asyncio.Event()
7977
8078 def on_event (event ):
81- if event.type.value == " assistant.message" :
82- print (cast(AssistantMessageData, event.data) .content)
79+ if event.type.value == " assistant.message" and isinstance (event.data, AssistantMessageData) :
80+ print (event.data.content)
8381 elif event.type.value == " session.idle" :
8482 done.set()
8583
@@ -338,7 +336,6 @@ Enable streaming to receive assistant response chunks as they're generated:
338336
339337``` python
340338import asyncio
341- from typing import cast
342339
343340from copilot import CopilotClient
344341from copilot.generated.session_events import (
@@ -362,21 +359,25 @@ async def main():
362359 def on_event (event ):
363360 match event.type.value:
364361 case " assistant.message_delta" :
365- # Streaming message chunk - print incrementally
366- delta = cast(AssistantMessageDeltaData, event.data).delta_content or " "
367- print (delta, end = " " , flush = True )
362+ if isinstance (event.data, AssistantMessageDeltaData):
363+ # Streaming message chunk - print incrementally
364+ delta = event.data.delta_content or " "
365+ print (delta, end = " " , flush = True )
368366 case " assistant.reasoning_delta" :
369- # Streaming reasoning chunk (if model supports reasoning)
370- delta = cast(AssistantReasoningDeltaData, event.data).delta_content or " "
371- print (delta, end = " " , flush = True )
367+ if isinstance (event.data, AssistantReasoningDeltaData):
368+ # Streaming reasoning chunk (if model supports reasoning)
369+ delta = event.data.delta_content or " "
370+ print (delta, end = " " , flush = True )
372371 case " assistant.message" :
373- # Final message - complete content
374- print (" \n --- Final message ---" )
375- print (cast(AssistantMessageData, event.data).content)
372+ if isinstance (event.data, AssistantMessageData):
373+ # Final message - complete content
374+ print (" \n --- Final message ---" )
375+ print (event.data.content)
376376 case " assistant.reasoning" :
377- # Final reasoning content (if model supports reasoning)
378- print (" --- Reasoning ---" )
379- print (cast(AssistantReasoningData, event.data).content)
377+ if isinstance (event.data, AssistantReasoningData):
378+ # Final reasoning content (if model supports reasoning)
379+ print (" --- Reasoning ---" )
380+ print (event.data.content)
380381 case " session.idle" :
381382 # Session finished processing
382383 done.set()
0 commit comments