mirror of
https://github.com/openai/codex.git
synced 2026-04-30 11:21:34 +03:00
Remove CODEX_PYTHON_RUNTIME_VERSION from the repo bootstrap path and always provision the checked-in pinned codex-cli-bin runtime version for examples, notebook, and real integration coverage. This keeps repo-source Python execution aligned with one binary contract, rewires the real integration harness and notebook bootstrap to use the pinned runtime directly, and updates the docs to describe automatic pinned-runtime provisioning instead of env-driven overrides. Validation: - RUN_REAL_CODEX_TESTS=1 python3 -m pytest sdk/python/tests -rs - RUN_REAL_CODEX_TESTS=1 python3 -m pytest sdk/python/tests/test_real_app_server_integration.py -rs Co-authored-by: Codex <noreply@openai.com>
549 lines
20 KiB
Plaintext
549 lines
20 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Codex Python SDK Walkthrough\n",
|
|
"\n",
|
|
"Public SDK surface only (`codex_app_server` root exports)."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Cell 1: bootstrap local SDK imports + pinned runtime package\n",
|
|
"import os\n",
|
|
"import sys\n",
|
|
"from pathlib import Path\n",
|
|
"\n",
|
|
"if sys.version_info < (3, 10):\n",
|
|
" raise RuntimeError(\n",
|
|
" f'Notebook requires Python 3.10+; current interpreter is {sys.version.split()[0]}.'\n",
|
|
" )\n",
|
|
"\n",
|
|
"try:\n",
|
|
" _ = os.getcwd()\n",
|
|
"except FileNotFoundError:\n",
|
|
" os.chdir(str(Path.home()))\n",
|
|
"\n",
|
|
"\n",
|
|
"def _is_sdk_python_dir(path: Path) -> bool:\n",
|
|
" return (path / 'pyproject.toml').exists() and (path / 'src' / 'codex_app_server').exists()\n",
|
|
"\n",
|
|
"\n",
|
|
"def _iter_home_fallback_candidates(home: Path):\n",
|
|
" # bounded depth scan under home to support launching notebooks from unrelated cwd values\n",
|
|
" patterns = ('sdk/python', '*/sdk/python', '*/*/sdk/python', '*/*/*/sdk/python')\n",
|
|
" for pattern in patterns:\n",
|
|
" yield from home.glob(pattern)\n",
|
|
"\n",
|
|
"\n",
|
|
"def _find_sdk_python_dir(start: Path) -> Path | None:\n",
|
|
" checked = set()\n",
|
|
"\n",
|
|
" def _consider(candidate: Path) -> Path | None:\n",
|
|
" resolved = candidate.resolve()\n",
|
|
" if resolved in checked:\n",
|
|
" return None\n",
|
|
" checked.add(resolved)\n",
|
|
" if _is_sdk_python_dir(resolved):\n",
|
|
" return resolved\n",
|
|
" return None\n",
|
|
"\n",
|
|
" for candidate in [start, *start.parents]:\n",
|
|
" found = _consider(candidate)\n",
|
|
" if found is not None:\n",
|
|
" return found\n",
|
|
"\n",
|
|
" for candidate in [start / 'sdk' / 'python', *(parent / 'sdk' / 'python' for parent in start.parents)]:\n",
|
|
" found = _consider(candidate)\n",
|
|
" if found is not None:\n",
|
|
" return found\n",
|
|
"\n",
|
|
" env_dir = os.environ.get('CODEX_PYTHON_SDK_DIR')\n",
|
|
" if env_dir:\n",
|
|
" found = _consider(Path(env_dir).expanduser())\n",
|
|
" if found is not None:\n",
|
|
" return found\n",
|
|
"\n",
|
|
" for entry in sys.path:\n",
|
|
" if not entry:\n",
|
|
" continue\n",
|
|
" entry_path = Path(entry).expanduser()\n",
|
|
" for candidate in (entry_path, entry_path / 'sdk' / 'python'):\n",
|
|
" found = _consider(candidate)\n",
|
|
" if found is not None:\n",
|
|
" return found\n",
|
|
"\n",
|
|
" home = Path.home()\n",
|
|
" for candidate in _iter_home_fallback_candidates(home):\n",
|
|
" found = _consider(candidate)\n",
|
|
" if found is not None:\n",
|
|
" return found\n",
|
|
"\n",
|
|
" return None\n",
|
|
"\n",
|
|
"\n",
|
|
"repo_python_dir = _find_sdk_python_dir(Path.cwd())\n",
|
|
"if repo_python_dir is None:\n",
|
|
" raise RuntimeError('Could not locate sdk/python. Set CODEX_PYTHON_SDK_DIR to your sdk/python path.')\n",
|
|
"\n",
|
|
"repo_python_str = str(repo_python_dir)\n",
|
|
"if repo_python_str not in sys.path:\n",
|
|
" sys.path.insert(0, repo_python_str)\n",
|
|
"\n",
|
|
"from _runtime_setup import ensure_runtime_package_installed\n",
|
|
"\n",
|
|
"runtime_version = ensure_runtime_package_installed(\n",
|
|
" sys.executable,\n",
|
|
" repo_python_dir,\n",
|
|
")\n",
|
|
"\n",
|
|
"src_dir = repo_python_dir / 'src'\n",
|
|
"src_str = str(src_dir)\n",
|
|
"if src_str not in sys.path:\n",
|
|
" sys.path.insert(0, src_str)\n",
|
|
"\n",
|
|
"# Force fresh imports after SDK upgrades in the same notebook kernel.\n",
|
|
"for module_name in list(sys.modules):\n",
|
|
" if module_name == 'codex_app_server' or module_name.startswith('codex_app_server.'):\n",
|
|
" sys.modules.pop(module_name, None)\n",
|
|
"\n",
|
|
"print('Kernel:', sys.executable)\n",
|
|
"print('SDK source:', src_dir)\n",
|
|
"print('Runtime package:', runtime_version)\n",
|
|
"\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Cell 2: imports (public only)\n",
|
|
"from codex_app_server import (\n",
|
|
" AsyncCodex,\n",
|
|
" Codex,\n",
|
|
" ImageInput,\n",
|
|
" LocalImageInput,\n",
|
|
" TextInput,\n",
|
|
" retry_on_overload,\n",
|
|
")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Cell 3: simple sync conversation\n",
|
|
"with Codex() as codex:\n",
|
|
" thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n",
|
|
" turn = thread.turn(TextInput('Explain gradient descent in 3 bullets.'))\n",
|
|
" result = turn.run()\n",
|
|
"\n",
|
|
" print('server:', codex.metadata)\n",
|
|
" print('status:', result.status)\n",
|
|
" print(result.text)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Cell 4: multi-turn continuity in same thread\n",
|
|
"with Codex() as codex:\n",
|
|
" thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n",
|
|
"\n",
|
|
" first = thread.turn(TextInput('Give a short summary of transformers.')).run()\n",
|
|
" second = thread.turn(TextInput('Now explain that to a high-school student.')).run()\n",
|
|
"\n",
|
|
" print('first status:', first.status)\n",
|
|
" print('second status:', second.status)\n",
|
|
" print('second text:', second.text)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Cell 5: full thread lifecycle and branching (sync)\n",
|
|
"with Codex() as codex:\n",
|
|
" thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n",
|
|
" first = thread.turn(TextInput('One sentence about structured planning.')).run()\n",
|
|
" second = thread.turn(TextInput('Now restate it for a junior engineer.')).run()\n",
|
|
"\n",
|
|
" reopened = codex.thread_resume(thread.id)\n",
|
|
" listing_active = codex.thread_list(limit=20, archived=False)\n",
|
|
" reading = reopened.read(include_turns=True)\n",
|
|
"\n",
|
|
" _ = reopened.set_name('sdk-lifecycle-demo')\n",
|
|
" _ = codex.thread_archive(reopened.id)\n",
|
|
" listing_archived = codex.thread_list(limit=20, archived=True)\n",
|
|
" unarchived = codex.thread_unarchive(reopened.id)\n",
|
|
"\n",
|
|
" resumed_info = 'n/a'\n",
|
|
" try:\n",
|
|
" resumed = codex.thread_resume(\n",
|
|
" unarchived.id,\n",
|
|
" model='gpt-5',\n",
|
|
" config={'model_reasoning_effort': 'high'},\n",
|
|
" )\n",
|
|
" resumed_result = resumed.turn(TextInput('Continue in one short sentence.')).run()\n",
|
|
" resumed_info = f'{resumed_result.turn_id} {resumed_result.status}'\n",
|
|
" except Exception as e:\n",
|
|
" resumed_info = f'skipped({type(e).__name__})'\n",
|
|
"\n",
|
|
" forked_info = 'n/a'\n",
|
|
" try:\n",
|
|
" forked = codex.thread_fork(unarchived.id, model='gpt-5')\n",
|
|
" forked_result = forked.turn(TextInput('Take a different angle in one short sentence.')).run()\n",
|
|
" forked_info = f'{forked_result.turn_id} {forked_result.status}'\n",
|
|
" except Exception as e:\n",
|
|
" forked_info = f'skipped({type(e).__name__})'\n",
|
|
"\n",
|
|
" compact_info = 'sent'\n",
|
|
" try:\n",
|
|
" _ = unarchived.compact()\n",
|
|
" except Exception as e:\n",
|
|
" compact_info = f'skipped({type(e).__name__})'\n",
|
|
"\n",
|
|
" print('Lifecycle OK:', thread.id)\n",
|
|
" print('first:', first.turn_id, first.status)\n",
|
|
" print('second:', second.turn_id, second.status)\n",
|
|
" print('read.turns:', len(reading.thread.turns or []))\n",
|
|
" print('list.active:', len(listing_active.data))\n",
|
|
" print('list.archived:', len(listing_archived.data))\n",
|
|
" print('resumed:', resumed_info)\n",
|
|
" print('forked:', forked_info)\n",
|
|
" print('compact:', compact_info)\n",
|
|
"\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Cell 5b: one turn with most optional turn params\n",
|
|
"from pathlib import Path\n",
|
|
"from codex_app_server import (\n",
|
|
" AskForApproval,\n",
|
|
" Personality,\n",
|
|
" ReasoningEffort,\n",
|
|
" ReasoningSummary,\n",
|
|
" SandboxPolicy,\n",
|
|
")\n",
|
|
"\n",
|
|
"output_schema = {\n",
|
|
" 'type': 'object',\n",
|
|
" 'properties': {\n",
|
|
" 'summary': {'type': 'string'},\n",
|
|
" 'actions': {'type': 'array', 'items': {'type': 'string'}},\n",
|
|
" },\n",
|
|
" 'required': ['summary', 'actions'],\n",
|
|
" 'additionalProperties': False,\n",
|
|
"}\n",
|
|
"\n",
|
|
"sandbox_policy = SandboxPolicy.model_validate({'type': 'readOnly', 'access': {'type': 'fullAccess'}})\n",
|
|
"summary = ReasoningSummary.model_validate('concise')\n",
|
|
"\n",
|
|
"with Codex() as codex:\n",
|
|
" thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n",
|
|
" turn = thread.turn(\n",
|
|
" TextInput('Propose a safe production feature-flag rollout. Return JSON matching the schema.'),\n",
|
|
" approval_policy=AskForApproval.never,\n",
|
|
" cwd=str(Path.cwd()),\n",
|
|
" effort=ReasoningEffort.medium,\n",
|
|
" model='gpt-5',\n",
|
|
" output_schema=output_schema,\n",
|
|
" personality=Personality.pragmatic,\n",
|
|
" sandbox_policy=sandbox_policy,\n",
|
|
" summary=summary,\n",
|
|
" )\n",
|
|
" result = turn.run()\n",
|
|
"\n",
|
|
" print('status:', result.status)\n",
|
|
" print(result.text)\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Cell 5c: choose highest model + highest supported reasoning, then run turns\n",
|
|
"from pathlib import Path\n",
|
|
"from codex_app_server import (\n",
|
|
" AskForApproval,\n",
|
|
" Personality,\n",
|
|
" ReasoningEffort,\n",
|
|
" ReasoningSummary,\n",
|
|
" SandboxPolicy,\n",
|
|
")\n",
|
|
"\n",
|
|
"reasoning_rank = {\n",
|
|
" 'none': 0,\n",
|
|
" 'minimal': 1,\n",
|
|
" 'low': 2,\n",
|
|
" 'medium': 3,\n",
|
|
" 'high': 4,\n",
|
|
" 'xhigh': 5,\n",
|
|
"}\n",
|
|
"\n",
|
|
"\n",
|
|
"def pick_highest_model(models):\n",
|
|
" visible = [m for m in models if not m.hidden] or models\n",
|
|
" known_names = {m.id for m in visible} | {m.model for m in visible}\n",
|
|
" top_candidates = [m for m in visible if not (m.upgrade and m.upgrade in known_names)]\n",
|
|
" pool = top_candidates or visible\n",
|
|
" return max(pool, key=lambda m: (m.model, m.id))\n",
|
|
"\n",
|
|
"\n",
|
|
"def pick_highest_turn_effort(model) -> ReasoningEffort:\n",
|
|
" if not model.supported_reasoning_efforts:\n",
|
|
" return ReasoningEffort.medium\n",
|
|
" best = max(model.supported_reasoning_efforts, key=lambda opt: reasoning_rank.get(opt.reasoning_effort.value, -1))\n",
|
|
" return ReasoningEffort(best.reasoning_effort.value)\n",
|
|
"\n",
|
|
"\n",
|
|
"output_schema = {\n",
|
|
" 'type': 'object',\n",
|
|
" 'properties': {\n",
|
|
" 'summary': {'type': 'string'},\n",
|
|
" 'actions': {'type': 'array', 'items': {'type': 'string'}},\n",
|
|
" },\n",
|
|
" 'required': ['summary', 'actions'],\n",
|
|
" 'additionalProperties': False,\n",
|
|
"}\n",
|
|
"sandbox_policy = SandboxPolicy.model_validate({'type': 'readOnly', 'access': {'type': 'fullAccess'}})\n",
|
|
"\n",
|
|
"with Codex() as codex:\n",
|
|
" models = codex.models(include_hidden=True)\n",
|
|
" selected_model = pick_highest_model(models.data)\n",
|
|
" selected_effort = pick_highest_turn_effort(selected_model)\n",
|
|
"\n",
|
|
" print('selected.model:', selected_model.model)\n",
|
|
" print('selected.effort:', selected_effort.value)\n",
|
|
"\n",
|
|
" thread = codex.thread_start(model=selected_model.model, config={'model_reasoning_effort': selected_effort.value})\n",
|
|
"\n",
|
|
" first = thread.turn(\n",
|
|
" TextInput('Give one short sentence about reliable production releases.'),\n",
|
|
" model=selected_model.model,\n",
|
|
" effort=selected_effort,\n",
|
|
" ).run()\n",
|
|
" print('agent.message:', first.text)\n",
|
|
" print('usage:', first.usage)\n",
|
|
"\n",
|
|
" second = thread.turn(\n",
|
|
" TextInput('Return JSON for a safe feature-flag rollout plan.'),\n",
|
|
" approval_policy=AskForApproval.never,\n",
|
|
" cwd=str(Path.cwd()),\n",
|
|
" effort=selected_effort,\n",
|
|
" model=selected_model.model,\n",
|
|
" output_schema=output_schema,\n",
|
|
" personality=Personality.pragmatic,\n",
|
|
" sandbox_policy=sandbox_policy,\n",
|
|
" summary=ReasoningSummary.model_validate('concise'),\n",
|
|
" ).run()\n",
|
|
" print('agent.message.params:', second.text)\n",
|
|
" print('usage.params:', second.usage)\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Cell 6: multimodal with remote image\n",
|
|
"remote_image_url = 'https://raw.githubusercontent.com/github/explore/main/topics/python/python.png'\n",
|
|
"\n",
|
|
"with Codex() as codex:\n",
|
|
" thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n",
|
|
" result = thread.turn([\n",
|
|
" TextInput('What do you see in this image? 3 bullets.'),\n",
|
|
" ImageInput(remote_image_url),\n",
|
|
" ]).run()\n",
|
|
"\n",
|
|
" print('status:', result.status)\n",
|
|
" print(result.text)\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Cell 7: multimodal with local image (bundled asset)\n",
|
|
"local_image_path = repo_python_dir / 'examples' / 'assets' / 'sample_scene.png'\n",
|
|
"if not local_image_path.exists():\n",
|
|
" raise FileNotFoundError(f'Missing bundled image: {local_image_path}')\n",
|
|
"\n",
|
|
"with Codex() as codex:\n",
|
|
" thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n",
|
|
" result = thread.turn([\n",
|
|
" TextInput('Describe this local image in 2 bullets.'),\n",
|
|
" LocalImageInput(str(local_image_path.resolve())),\n",
|
|
" ]).run()\n",
|
|
"\n",
|
|
" print('status:', result.status)\n",
|
|
" print(result.text)\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Cell 8: retry-on-overload pattern\n",
|
|
"with Codex() as codex:\n",
|
|
" thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n",
|
|
"\n",
|
|
" result = retry_on_overload(\n",
|
|
" lambda: thread.turn(TextInput('List 5 failure modes in distributed systems.')).run(),\n",
|
|
" max_attempts=3,\n",
|
|
" initial_delay_s=0.25,\n",
|
|
" max_delay_s=2.0,\n",
|
|
" )\n",
|
|
"\n",
|
|
" print('status:', result.status)\n",
|
|
" print(result.text)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Cell 9: full thread lifecycle and branching (async)\n",
|
|
"import asyncio\n",
|
|
"\n",
|
|
"\n",
|
|
"async def async_lifecycle_demo():\n",
|
|
" async with AsyncCodex() as codex:\n",
|
|
" thread = await codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n",
|
|
" first = await (await thread.turn(TextInput('One sentence about structured planning.'))).run()\n",
|
|
" second = await (await thread.turn(TextInput('Now restate it for a junior engineer.'))).run()\n",
|
|
"\n",
|
|
" reopened = await codex.thread_resume(thread.id)\n",
|
|
" listing_active = await codex.thread_list(limit=20, archived=False)\n",
|
|
" reading = await reopened.read(include_turns=True)\n",
|
|
"\n",
|
|
" _ = await reopened.set_name('sdk-lifecycle-demo')\n",
|
|
" _ = await codex.thread_archive(reopened.id)\n",
|
|
" listing_archived = await codex.thread_list(limit=20, archived=True)\n",
|
|
" unarchived = await codex.thread_unarchive(reopened.id)\n",
|
|
"\n",
|
|
" resumed_info = 'n/a'\n",
|
|
" try:\n",
|
|
" resumed = await codex.thread_resume(\n",
|
|
" unarchived.id,\n",
|
|
" model='gpt-5',\n",
|
|
" config={'model_reasoning_effort': 'high'},\n",
|
|
" )\n",
|
|
" resumed_result = await (await resumed.turn(TextInput('Continue in one short sentence.'))).run()\n",
|
|
" resumed_info = f'{resumed_result.turn_id} {resumed_result.status}'\n",
|
|
" except Exception as e:\n",
|
|
" resumed_info = f'skipped({type(e).__name__})'\n",
|
|
"\n",
|
|
" forked_info = 'n/a'\n",
|
|
" try:\n",
|
|
" forked = await codex.thread_fork(unarchived.id, model='gpt-5')\n",
|
|
" forked_result = await (await forked.turn(TextInput('Take a different angle in one short sentence.'))).run()\n",
|
|
" forked_info = f'{forked_result.turn_id} {forked_result.status}'\n",
|
|
" except Exception as e:\n",
|
|
" forked_info = f'skipped({type(e).__name__})'\n",
|
|
"\n",
|
|
" compact_info = 'sent'\n",
|
|
" try:\n",
|
|
" _ = await unarchived.compact()\n",
|
|
" except Exception as e:\n",
|
|
" compact_info = f'skipped({type(e).__name__})'\n",
|
|
"\n",
|
|
" print('Lifecycle OK:', thread.id)\n",
|
|
" print('first:', first.turn_id, first.status)\n",
|
|
" print('second:', second.turn_id, second.status)\n",
|
|
" print('read.turns:', len(reading.thread.turns or []))\n",
|
|
" print('list.active:', len(listing_active.data))\n",
|
|
" print('list.archived:', len(listing_archived.data))\n",
|
|
" print('resumed:', resumed_info)\n",
|
|
" print('forked:', forked_info)\n",
|
|
" print('compact:', compact_info)\n",
|
|
"\n",
|
|
"\n",
|
|
"await async_lifecycle_demo()\n",
|
|
"\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Cell 10: async stream + steer + interrupt (best effort)\n",
|
|
"import asyncio\n",
|
|
"\n",
|
|
"\n",
|
|
"async def async_stream_demo():\n",
|
|
" async with AsyncCodex() as codex:\n",
|
|
" thread = await codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n",
|
|
" turn = await thread.turn(TextInput('Count from 1 to 200 with commas, then one summary sentence.'))\n",
|
|
"\n",
|
|
" try:\n",
|
|
" _ = await turn.steer(TextInput('Keep it brief and stop after 20 numbers.'))\n",
|
|
" print('steer: sent')\n",
|
|
" except Exception as e:\n",
|
|
" print('steer: skipped', type(e).__name__)\n",
|
|
"\n",
|
|
" try:\n",
|
|
" _ = await turn.interrupt()\n",
|
|
" print('interrupt: sent')\n",
|
|
" except Exception as e:\n",
|
|
" print('interrupt: skipped', type(e).__name__)\n",
|
|
"\n",
|
|
" event_count = 0\n",
|
|
" async for event in turn.stream():\n",
|
|
" event_count += 1\n",
|
|
" print(event.method, event.payload)\n",
|
|
"\n",
|
|
" print('events.count:', event_count)\n",
|
|
"\n",
|
|
"\n",
|
|
"await async_stream_demo()\n",
|
|
"\n"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"name": "python",
|
|
"version": "3.10+"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|