Streaming repsonse
for chunk in stream:
print(f"Streaming chunk: {chunk}")
try:
# For OpenAI response.output_item.added
if hasattr(chunk, 'item') and hasattr(chunk.item, 'content'):
if chunk.item.content and len(chunk.item.content) > 0:
yield chunk.item.content[0].text
# For OpenAI response.content_part.added events
elif hasattr(chunk, 'part') and hasattr(chunk.part, 'text'):
yield chunk.part.text
# For OpenAI delta events (GPT-3.5 style)
elif hasattr(chunk, 'delta') and hasattr(chunk.delta, 'content'):
yield chunk.delta.content
# For OpenAI text completion
elif hasattr(chunk, 'text'):
yield chunk.text
# For OpenAI output_text attribute
elif hasattr(chunk, 'output_text'):
yield chunk.output_text
# For simple string responses
elif isinstance(chunk, str):
yield chunk
# As a fallback, try to get a meaningful string representation
else:
# Extract any text content we can find
if hasattr(chunk, 'content'):
yield chunk.content
elif hasattr(chunk, 'message') and hasattr(chunk.message, 'content'):
yield chunk.message.content
else:
# Last resort: convert to string and yield
# But filter out events that don't contain actual content
chunk_str = str(chunk)
if not any(event_type in chunk_str for event_type in ['created', 'in_progress', 'done']):
yield "Processing response..."
Streaming chunk: ResponseOutputItemDoneEvent(item=ResponseReasoningItem(id='rs_345234523454325', summary=[], type='reasoning', status=None), output_index=0, type='response.output_item.done')
Streaming chunk: ResponseOutputItemAddedEvent(item=ResponseOutputMessage(id='msg_682345352345', content=[], role='assistant', status='in_progress', type='message'), output_index=1, type='response.output_item.added')
Streaming chunk: ResponseContentPartAddedEvent(content_index=0, item_id='msg_682345352345', output_index=1, part=ResponseOutputText(annotations=[], text='', type='output_text'), type='response.content_part.added')
Streaming chunk: ResponseTextDeltaEvent(content_index=0, delta='Certainly', item_id='msg_682345352345', output_index=1, type='response.output_text.delta')
Streaming chunk: ResponseTextDeltaEvent(content_index=0, delta=',', item_id='msg_682345352345', output_index=1, type='response.output_text.delta')
Streaming chunk: ResponseTextDeltaEvent(content_index=0, delta=' Mr', item_id='msg_682345352345', output_index=1, type='response.output_text.delta')
...
Streaming chunk: ResponseTextDeltaEvent(content_index=0, delta=' your', item_id='msg_682345352345', output_index=1, type='response.output_text.delta')
Streaming chunk: ResponseTextDeltaEvent(content_index=0, delta=' time', item_id='msg_682345352345', output_index=1, type='response.output_text.delta')
Streaming chunk: ResponseTextDeltaEvent(content_index=0, delta=' exploring', item_id='msg_682345352345', output_index=1, type='response.output_text.delta')
Streaming chunk: ResponseTextDeltaEvent(content_index=0, delta='!', item_id='msg_682345352345', output_index=1, type='response.output_text.delta')
Streaming chunk: ResponseTextDoneEvent(content_index=0, item_id='msg_682345352345', output_index=1, text='response_text', type='response.output_text.done')
Streaming chunk: ResponseContentPartDoneEvent(content_index=0, item_id='msg_682345352345', output_index=1, part=ResponseOutputText(annotations=[], text='response_text', type='output_text'), type='response.content_part.done')
Streaming chunk: ResponseOutputItemDoneEvent(item=ResponseOutputMessage(id='msg_682345352345', content=[ResponseOutputText(annotations=[], text='response_text')], role='assistant', status='completed', type='message'), output_index=1, type='response.output_item.done')
Streaming chunk: ResponseCompletedEvent(response=Response(id='resp_681efacab408dsfaasdf', created_at=1746860746.0, error=None, incomplete_details=None, instructions='input_text', metadata={}, model='o4-mini-2025-04-16', object='response', output=[ResponseReasoningItem(id='rs_681efacsadfasdfasd', summary=[], type='reasoning', status=None), ResponseOutputMessage(id='msg_682345352345', content=[ResponseOutputText(annotations=[], text='response_text', type='output_text')], role='assistant', status='completed', type='message')], parallel_tool_calls=True, temperature=1.0, tool_choice='auto', tools=[], top_p=1.0, max_output_tokens=None, previous_response_id=None, reasoning=Reasoning(effort='medium', generate_summary=None, summary=None), service_tier='default', status='completed', text=ResponseTextConfig(format=ResponseFormatText(type='text')), truncation='disabled', usage=ResponseUsage(input_tokens=130, input_tokens_details=InputTokensDetails(cached_tokens=0), output_tokens=819, output_tokens_details=OutputTokensDetails(reasoning_tokens=576), total_tokens=949), user=None, store=True), type='response.completed')