Table of Contents

Class NewAudioStreamRequest

Namespace
LiveKit.Proto

Create a new AudioStream AudioStream is used to receive audio frames from a track

public sealed class NewAudioStreamRequest : Google.Protobuf.IMessage<NewAudioStreamRequest>, Google.Protobuf.IBufferMessage
Inheritance
NewAudioStreamRequest
Implements
IBufferMessage
Extension Methods

Constructors

NewAudioStreamRequest()

public NewAudioStreamRequest()

NewAudioStreamRequest(NewAudioStreamRequest)

public NewAudioStreamRequest(NewAudioStreamRequest other)

Parameters

other NewAudioStreamRequest

Fields

AudioFilterModuleIdFieldNumber

Field number for the "audio_filter_module_id" field.

public const int AudioFilterModuleIdFieldNumber = 5

Field Value

int

AudioFilterOptionsFieldNumber

Field number for the "audio_filter_options" field.

public const int AudioFilterOptionsFieldNumber = 6

Field Value

int

FrameSizeMsFieldNumber

Field number for the "frame_size_ms" field.

public const int FrameSizeMsFieldNumber = 7

Field Value

int

NumChannelsFieldNumber

Field number for the "num_channels" field.

public const int NumChannelsFieldNumber = 4

Field Value

int

QueueSizeFramesFieldNumber

Field number for the "queue_size_frames" field.

public const int QueueSizeFramesFieldNumber = 8

Field Value

int

SampleRateFieldNumber

Field number for the "sample_rate" field.

public const int SampleRateFieldNumber = 3

Field Value

int

TrackHandleFieldNumber

Field number for the "track_handle" field.

public const int TrackHandleFieldNumber = 1

Field Value

int

TypeFieldNumber

Field number for the "type" field.

public const int TypeFieldNumber = 2

Field Value

int

Properties

AudioFilterModuleId

Unique identifier passed in LoadAudioFilterPluginRequest

public string AudioFilterModuleId { get; set; }

Property Value

string

AudioFilterOptions

public string AudioFilterOptions { get; set; }

Property Value

string

Descriptor

public static Google.Protobuf.Reflection.MessageDescriptor Descriptor { get; }

Property Value

MessageDescriptor

FrameSizeMs

public uint FrameSizeMs { get; set; }

Property Value

uint

HasAudioFilterModuleId

Gets whether the "audio_filter_module_id" field is set

public bool HasAudioFilterModuleId { get; }

Property Value

bool

HasAudioFilterOptions

Gets whether the "audio_filter_options" field is set

public bool HasAudioFilterOptions { get; }

Property Value

bool

HasFrameSizeMs

Gets whether the "frame_size_ms" field is set

public bool HasFrameSizeMs { get; }

Property Value

bool

HasNumChannels

Gets whether the "num_channels" field is set

public bool HasNumChannels { get; }

Property Value

bool

HasQueueSizeFrames

Gets whether the "queue_size_frames" field is set

public bool HasQueueSizeFrames { get; }

Property Value

bool

HasSampleRate

Gets whether the "sample_rate" field is set

public bool HasSampleRate { get; }

Property Value

bool

HasTrackHandle

Gets whether the "track_handle" field is set

public bool HasTrackHandle { get; }

Property Value

bool

HasType

Gets whether the "type" field is set

public bool HasType { get; }

Property Value

bool

NumChannels

public uint NumChannels { get; set; }

Property Value

uint

Parser

public static Google.Protobuf.MessageParser<NewAudioStreamRequest> Parser { get; }

Property Value

MessageParser<NewAudioStreamRequest>

QueueSizeFrames

Maximum number of queued WebRTC sink frames. Each frame is typically 10 ms of decoded PCM audio on the receive path. Omit this field to use the default bounded queue size of 10 frames. Set it to 0 to request unbounded buffering.

If your application consumes both audio and video, keep the queue sizing strategy coordinated across both streams. Using a much larger queue, or unbounded buffering, for only one of them can increase end-to-end latency for that stream and cause audio/video drift.

public uint QueueSizeFrames { get; set; }

Property Value

uint

SampleRate

public uint SampleRate { get; set; }

Property Value

uint

TrackHandle

public ulong TrackHandle { get; set; }

Property Value

ulong

Type

public AudioStreamType Type { get; set; }

Property Value

AudioStreamType

Methods

CalculateSize()

public int CalculateSize()

Returns

int

ClearAudioFilterModuleId()

Clears the value of the "audio_filter_module_id" field

public void ClearAudioFilterModuleId()

ClearAudioFilterOptions()

Clears the value of the "audio_filter_options" field

public void ClearAudioFilterOptions()

ClearFrameSizeMs()

Clears the value of the "frame_size_ms" field

public void ClearFrameSizeMs()

ClearNumChannels()

Clears the value of the "num_channels" field

public void ClearNumChannels()

ClearQueueSizeFrames()

Clears the value of the "queue_size_frames" field

public void ClearQueueSizeFrames()

ClearSampleRate()

Clears the value of the "sample_rate" field

public void ClearSampleRate()

ClearTrackHandle()

Clears the value of the "track_handle" field

public void ClearTrackHandle()

ClearType()

Clears the value of the "type" field

public void ClearType()

Clone()

public NewAudioStreamRequest Clone()

Returns

NewAudioStreamRequest

Equals(NewAudioStreamRequest)

public bool Equals(NewAudioStreamRequest other)

Parameters

other NewAudioStreamRequest

Returns

bool

Equals(object)

public override bool Equals(object other)

Parameters

other object

Returns

bool

GetHashCode()

public override int GetHashCode()

Returns

int

MergeFrom(CodedInputStream)

public void MergeFrom(Google.Protobuf.CodedInputStream input)

Parameters

input CodedInputStream

MergeFrom(NewAudioStreamRequest)

public void MergeFrom(NewAudioStreamRequest other)

Parameters

other NewAudioStreamRequest

ToString()

public override string ToString()

Returns

string

WriteTo(CodedOutputStream)

public void WriteTo(Google.Protobuf.CodedOutputStream output)

Parameters

output CodedOutputStream