Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Doc Improvements #1585

Merged
merged 9 commits into from
Sep 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 24 additions & 16 deletions assets/code/open-ai-integration/rtc-py.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -48,16 +48,14 @@ class ChannelEventObserver(IRTCConnectionObserver, IRTCLocalUserObserver, IAudio
self.emitter = event_emitter
self.audio_stream = AudioStream()



def emit_event(self, event_name: str, *args):
"""Helper function to emit events."""
self.emitter.emit(event_name, *args)

def on_connected(
self, agora_rtc_conn: RTCConnection, conn_info: RTCConnInfo, reason
):
logger.info(f"Connected to RTC: {agora_rtc_conn} {conn_info} {reason}")
logger.info(f"Connected to RTC: {agora_rtc_conn} {conn_info} {reason}")
self.emit_event("connection_state_changed", agora_rtc_conn, conn_info, reason)

def on_disconnected(
Expand Down Expand Up @@ -132,13 +130,15 @@ class ChannelEventObserver(IRTCConnectionObserver, IRTCLocalUserObserver, IAudio
def on_playback_audio_frame_before_mixing(
self, agora_local_user: LocalUser, channelId, uid, frame: AudioFrame
):
# Convert the received audio frame to PcmAudioFrame
audio_frame = PcmAudioFrame()
audio_frame.samples_per_channel = frame.samples_per_channel
audio_frame.bytes_per_sample = frame.bytes_per_sample
audio_frame.number_of_channels = frame.channels
audio_frame.sample_rate = SAMPLE_RATE
audio_frame.data = frame.buffer

# Add the audio frame to the queue
self.loop.call_soon_threadsafe(self.audio_stream.queue.put_nowait, audio_frame)
return 0

Expand All @@ -155,16 +155,20 @@ class Channel():
self.chat = Chat(self)
self.channelId = channelId
self.uid = uid

# Configure RTC connection
conn_config = RTCConnConfig(
client_role_type=ClientRoleType.CLIENT_ROLE_BROADCASTER,
channel_profile=ChannelProfileType.CHANNEL_PROFILE_LIVE_BROADCASTING,
)
self.connection = self.rtc.agora_service.create_rtc_connection(conn_config)

self.channel_event_observer = ChannelEventObserver(self.emitter)
# Set up channel event observer
self.channel_event_observer = ChannelEventObserver(self.emitter)
self.connection.register_observer(self.channel_event_observer)
self.connection.connect("", self.channelId, self.uid)

# Configure local user
self.local_user = self.connection.get_local_user()
self.local_user.set_playback_audio_frame_before_mixing_parameters(
CHANNELS, SAMPLE_RATE
Expand All @@ -173,6 +177,7 @@ class Channel():
self.local_user.register_audio_frame_observer(self.channel_event_observer)
self.local_user.subscribe_all_audio()

# Set up audio track for publishing
self.media_node_factory = self.rtc.agora_service.create_media_node_factory()
self.audio_pcm_data_sender = (
self.media_node_factory.create_audio_pcm_data_sender()
Expand All @@ -183,6 +188,7 @@ class Channel():
self.audio_track.set_enabled(1)
self.local_user.publish_audio(self.audio_track)

# Create data stream for messaging
self.stream_id = self.connection.create_data_stream(False, False)
self.received_chunks = {}
self.waiting_message = None
Expand Down Expand Up @@ -214,10 +220,11 @@ class Channel():
async def push_audio_frame(self, frame: bytes) -> None:
"""
Pushes an audio frame to the channel.

Parameters:
frame: The audio frame to push.
"""
# Create a PcmAudioFrame from the input bytes
audio_frame = PcmAudioFrame()
audio_frame.data = bytearray(frame)
audio_frame.timestamp = 0
Expand All @@ -228,6 +235,7 @@ class Channel():
len(frame) / audio_frame.bytes_per_sample / audio_frame.number_of_channels
)

# Send the audio frame
self.audio_pcm_data_sender.send_audio_pcm_data(audio_frame)

async def subscribe_audio(self, uid: int) -> None:
Expand Down Expand Up @@ -299,15 +307,15 @@ class Channel():
def _split_string_into_chunks(self, long_string, msg_id, chunk_size=300) -> list[dict[str: Any]]:
"""
Splits a long string into chunks of a given size.

Parameters:
long_string: The string to split.
msg_id: The message ID.
chunk_size: The size of each chunk.

Returns:
list[dict[str: Any]]: The list of chunks.

"""
total_parts = (len(long_string) + chunk_size - 1) // chunk_size
json_chunks = []
Expand All @@ -321,10 +329,10 @@ class Channel():
'content': long_string[start:end]
}
json_chunk = json.dumps(chunk, ensure_ascii=False)
json_chunks.append(json_chunk)
json_chunks.append(json_chunk)
return json_chunks

async def send_stream_message(self, data: str, msg_id: str) -> None:
async def send_stream_message(self, data: str, msg_id: str) -> None:
"""
Sends a stream message to the channel.

Expand All @@ -333,14 +341,14 @@ class Channel():
msg_id: The message ID.
"""

chunks = self._split_string_into_chunks(data, msg_id)
chunks = self._split_string_into_chunks(data, msg_id)
for chunk in chunks:
self.connection.send_stream_message(self.stream_id, chunk)

def on(self, event_name: str, callback):
"""
Allows external components to subscribe to events.

Parameters:
event_name: The name of the event to subscribe to.
callback: The callback to call when the event is emitted.
Expand All @@ -351,7 +359,7 @@ class Channel():
def once(self, event_name: str, callback):
"""
Allows external components to subscribe to events once.

Parameters:
event_name: The name of the event to subscribe to.
callback: The callback to call when the event is emitted.
Expand Down Expand Up @@ -386,6 +394,7 @@ class Chat():
"unhandled exception",
exc_info=t.exception(),
)
# Start processing messages
asyncio.create_task(self._process_message()).add_done_callback(log_exception)

async def send_message(self, item: ChatMessage) -> None:
Expand All @@ -402,7 +411,6 @@ class Chat():
"""
Processes messages in the queue.
"""

while True:
item: ChatMessage = await self.queue.get()
await self.channel.send_stream_message(item.message, item.msg_id)
Expand Down Expand Up @@ -436,13 +444,12 @@ class RtcEngine:
Returns:
Channel: The channel.
"""

loop = asyncio.get_event_loop()
future = loop.create_future()

def callback(agora_rtc_conn: RTCConnection, conn_info: RTCConnInfo, reason):
channel.off("connection_state_changed", callback)
if conn_info.state == 3:
if conn_info.state == 3: # 3 indicates a successful connection
future.set_result(channel)
else:
future.set_exception(
Expand All @@ -460,4 +467,5 @@ class RtcEngine:
Destroys the RTC engine.
"""
self.agora_service.release()`}
</CodeBlock>

</CodeBlock>
5 changes: 2 additions & 3 deletions open-ai-integration/overview/core-concepts.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,8 @@ description: >
Ideas that are central to developing with Agora.
---

import CoreConcepts from '@docs/shared/common/core-concepts/real-time-stt.mdx';
import CoreConcepts from '@docs/shared/common/core-concepts/open-ai-intro.mdx';

export const toc = [{}];

<CoreConcepts PRODUCT="Real-Time STT" company="Agora" PATH="real-time-stt" client="app" />

<CoreConcepts PRODUCT="Real-Time STT" company="Agora" PATH="open-ai-intro" client="app" />
81 changes: 43 additions & 38 deletions open-ai-integration/overview/product-overview.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -7,44 +7,49 @@ description: >
---

<ProductOverview
title="OpenAI integration"
img="/images/real-time-stt/real-time-stt.png"
quickStartLink="/open-ai-integration/get-started/quickstart"
productFeatures={[
{
title: "Real-Time audio streaming",
content: "Integrate real-time audio streaming between Agora's communication platform and OpenAI's language models to facilitate seamless voice interactions.",
link: ""
},
{
title: "Asynchronous processing",
content: "Handle audio input and model messages concurrently, ensuring responsive interactions without blocking audio streaming.",
link: ""
},
{
title: "Audio frame management",
content: "Manage audio frames effectively by capturing audio data from the Agora channel, sending it to OpenAI for processing, and routing synthesized audio back to users.",
link: ""
},
{
title: "Comprehensive message handling",
content: "Process various message types, including audio transcription deltas and completion notifications, to ensure users receive timely updates and responses.",
link: ""
},
{
title: "Dynamic session configuration",
content: "Dynamically configure session parameters, such as system messages and audio formats, to customize behavior based on application requirements.",
link: ""
},
{
title: "Flexible tool registration",
content: "Enable the registration of both local functions and pass-through tools, allowing the AI to perform specific tasks and retrieve external data.",
link: ""
},
]}

title="OpenAI Integration"
img="/images/real-time-stt/real-time-stt.png"
quickStartLink="/open-ai-integration/get-started/quickstart"
productFeatures={[
{
title: 'Real-Time Audio Streaming',
content:
"Integrate real-time audio streaming between Agora's communication platform and OpenAI's language models to facilitate seamless voice interactions.",
link: '',
},
{
title: 'Asynchronous Processing',
content: 'Handle audio input and model messages concurrently, ensuring responsive interactions without blocking audio streaming.',
link: '',
},
{
title: 'Audio Frame Management',
content:
'Manage audio frames effectively by capturing audio data from the Agora channel, sending it to OpenAI for processing, and routing synthesized audio back to users.',
link: '',
},
{
title: 'Comprehensive Message Handling',
content:
'Process various message types, including audio transcription deltas and completion notifications, to ensure users receive timely updates and responses.',
link: '',
},
{
title: 'Dynamic Session Configuration',
content:
'Dynamically configure session parameters, such as system messages and audio formats, to customize behavior based on application requirements.',
link: '',
},
{
title: 'Flexible Tool Registration',
content:
'Enable the registration of both local functions and pass-through tools, allowing the AI to perform specific tasks and retrieve external data.',
link: '',
},
]}
>

Integrating Agora's real-time audio communication with OpenAI's Large Language Models (LLM) opens the door to powerful, interactive voice-based applications. Create seamless voice-enabled experiences, such as voice-controlled AI assistants, or interactive dialogue systems by combining Agora's robust real-time audio streaming capabilities with the conversational intelligence of OpenAI's LLMs. This integration allows for dynamic, responsive audio interactions, enhancing user engagement across a wide range of use cases—from customer support bots to collaborative voice-driven applications.
Integrating Agoras real-time audio communication with OpenAIs Large Language Models (LLMs) unlocks the potential for powerful, interactive voice-based applications. By combining Agoras robust real-time audio streaming capabilities with the conversational intelligence of OpenAIs LLMs, you can create seamless voice-enabled experiences, such as voice-powered AI assistants or interactive dialogue systems. This integration enables dynamic, responsive audio interactions, enhancing user engagement across a broad range of use cases—from customer support bots to collaborative voice-driven applications.

</ProductOverview>
Most importantly, by combining the strengths of Agora and OpenAI, this integration enables the most natural form of language interaction, lowering the barrier for users to harness the power of AI and making advanced technologies more accessible than ever before.
</ProductOverview>
35 changes: 19 additions & 16 deletions shared/common/core-concepts/agora-console.mdx
Original file line number Diff line number Diff line change
@@ -1,25 +1,28 @@
<ProductWrapper notAllowed={["interactive-whiteboard","cloud-recording","agora-analytics","extensions-marketplace"]}>
To use <Vg k="COMPANY" /> SDKs, create an audio and video project in the <Vg k="CONSOLE" /> first. See [Agora account management](../get-started/manage-agora-account) for details.
<ProductWrapper notAllowed={['interactive-whiteboard', 'cloud-recording', 'agora-analytics', 'extensions-marketplace']}>
<Link to="{{Global.AGORA_CONSOLE_URL}}"><Vg k="CONSOLE" /></Link> is the main dashboard where you manage your <Vg k="COMPANY" /> projects and services. Before you can use <Vg k="COMPANY" />'s SDKs, you must first create a project in the <Vg k="CONSOLE" />. See [Agora account management](../get-started/manage-agora-account) for
details.
</ProductWrapper>

<ProductWrapper product={["interactive-whiteboard","cloud-recording","agora-analytics","extensions-marketplace"]}>
To use <Vg k="COMPANY" /> <Vpd k="NAME" />, create a project in the <Vg k="CONSOLE" /> first. See [Agora account management](../reference/manage-agora-account) for details.
<ProductWrapper product={['interactive-whiteboard', 'cloud-recording', 'agora-analytics', 'extensions-marketplace']}>
To use <Vg k="COMPANY" /> <Vpd k="NAME" />, create a project in the <Vg k="CONSOLE" /> first.
</ProductWrapper>

<ProductWrapper notAllowed="interactive-whiteboard">
![Create project in Agora Console](/images/common/create-project.svg)
</ProductWrapper>
<ProductWrapper notAllowed="interactive-whiteboard">![Create project in Agora Console](/images/common/create-project.svg)</ProductWrapper>

#### <Vg k="CONSOLE" />

<Link to="{{Global.AGORA_CONSOLE_URL}}"><Vg k="CONSOLE" /></Link> is the main dashboard where you manage your <Vg k="COMPANY" /> projects and services. <Vg k="CONSOLE" /> provides an intuitive interface for developers to query and manage their <Vg k="COMPANY" /> account. After registering an <Link to="{{Global.AGORA_CONSOLE_URL}}">Agora Account</Link>, you use the <Vg k="CONSOLE" /> to perform the following tasks:
<Link to="{{Global.AGORA_CONSOLE_URL}}"><Vg k="CONSOLE" /></Link> provides an intuitive interface for developers to query and manage their <Vg k="COMPANY" /> account. After registering an <Link to="{{Global.AGORA_CONSOLE_URL}}">Agora Account</Link>, you use the <Vg k="CONSOLE" /> to perform the following tasks:

- Manage the account
- Create and configure <Vg k="COMPANY" /> projects and services
- Get an App ID
- Manage members and roles
- Check call quality and usage
- Check bills and make payments
- Access product resources

<Vg k="COMPANY" /> also provides RESTful APIs that you use to implement features such as creating a project and fetching usage numbers programmatically.

- Manage the account
- Create and configure <Vg k="COMPANY" /> projects and services
- Get an App ID
- Manage members and roles
- Check call quality and usage
- Check bills and make payments
- Access product resources
#### <Vg k="COMPANY" /> Account Management

<Vg k="COMPANY" /> also provides RESTful APIs that you use to implement features such as creating a project and fetching usage numbers programmatically.
See [Agora account management](../reference/manage-agora-account) for details on how to manage all aspects of your <Vg k="COMPANY" /> account.
16 changes: 3 additions & 13 deletions shared/common/core-concepts/app-certificate.mdx
Original file line number Diff line number Diff line change
@@ -1,15 +1,5 @@
#### App certificate
#### App Certificate

An App certificate is a string generated by <Vg k="CONSOLE" /> to enable token authentication. It is required for generating a <Vg k="VSDK" /> or <Vg k="MESS" /> authentication token.
An App Certificate is a unique key generated by the <Vg k="CONSOLE" /> to secure projects through token authentication. It is required, along with the App ID, to generate a token that proves authorization between your systems and <Vg k="COMPANY" />'s network. App Certificates are used to generate <Vg k="VSDK" /> or <Vg k="MESS" /> authentication tokens.

<ProductWrapper notAllowed={["extensions-marketplace","agora-analytics","video-calling", "voice-calling",
"interactive-live-streaming", "broadcast-streaming","signaling"]}>

To use your App certificate for setting up a token server, see [Create and run a token server.](../get-started/authentication-workflow)

</ProductWrapper>

<ProductWrapper product={["video-calling", "voice-calling", "interactive-live-streaming", "broadcast-streaming","signaling"]}>
To use your App certificate for setting up a token server, see [Create and run a token server.](../get-started/authentication-workflow)

</ProductWrapper>
App Certificates should be stored securely in your backend systems. If your App Certificate is compromised or to meet security compliance requirements, you can invalidate certificates and create new ones through the <Vg k='CONSOLE' />.
Loading
Loading