Skip to content

Commit 6236d44

Browse files
sda-robNero-Hu
andauthored
[AUTO] Generate comments by iris-doc (#1185)
Co-authored-by: Nero-Hu <Nero-Hu@users.noreply.github.com>
1 parent 1aaa454 commit 6236d44

File tree

5 files changed

+111
-2109
lines changed

5 files changed

+111
-2109
lines changed

ts/Private/AgoraBase.ts

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -327,7 +327,7 @@ export enum ErrorCodeType {
327327
*/
328328
ErrInvalidUserId = 121,
329329
/**
330-
* @ignore
330+
* 122: Data streams decryption fails. The user might use an incorrect password to join the channel. Check the entered password, or tell the user to try rejoining the channel.
331331
*/
332332
ErrDatastreamDecryptionFailed = 122,
333333
/**
@@ -2208,7 +2208,7 @@ export enum LocalVideoStreamReason {
22082208
*/
22092209
LocalVideoStreamReasonScreenCaptureWindowRecoverFromHidden = 26,
22102210
/**
2211-
* @ignore
2211+
* 27: The window for screen capture has been restored from the minimized state.
22122212
*/
22132213
LocalVideoStreamReasonScreenCaptureWindowRecoverFromMinimized = 27,
22142214
/**
@@ -3867,7 +3867,7 @@ export enum AudioEffectPreset {
38673867
*/
38683868
RoomAcousticsVirtualSurroundSound = 0x02010900,
38693869
/**
3870-
* @ignore
3870+
* The audio effect of chorus. Agora recommends using this effect in chorus scenarios to enhance the sense of depth and dimension in the vocals.
38713871
*/
38723872
RoomAcousticsChorus = 0x02010d00,
38733873
/**
@@ -4005,9 +4005,13 @@ export enum HeadphoneEqualizerPreset {
40054005
*/
40064006
export class ScreenCaptureParameters {
40074007
/**
4008-
* The video encoding resolution of the shared screen stream. See VideoDimensions. The default value is 1920 × 1080, that is, 2,073,600 pixels. Agora uses the value of this parameter to calculate the charges. If the screen dimensions are different from the value of this parameter, Agora applies the following strategies for encoding. Suppose is set to 1920 × 1080:
4008+
* The video encoding resolution of the screen sharing stream. See VideoDimensions. The default value is 1920 × 1080, that is, 2,073,600 pixels. Agora uses the value of this parameter to calculate the charges. If the screen dimensions are different from the value of this parameter, Agora applies the following strategies for encoding. Suppose dimensions is set to 1920 × 1080:
40094009
* If the value of the screen dimensions is lower than that of dimensions, for example, 1000 × 1000 pixels, the SDK uses the screen dimensions, that is, 1000 × 1000 pixels, for encoding.
4010-
* If the value of the screen dimensions is higher than that of dimensions, for example, 2000 × 1500, the SDK uses the maximum value under with the aspect ratio of the screen dimension (4:3) for encoding, that is, 1440 × 1080.
4010+
* If the value of the screen dimensions is higher than that of dimensions, for example, 2000 × 1500, the SDK uses the maximum value under dimensions with the aspect ratio of the screen dimension (4:3) for encoding, that is, 1440 × 1080. When setting the encoding resolution in the scenario of sharing documents (ScreenScenarioDocument), choose one of the following two methods:
4011+
* If you require the best image quality, it is recommended to set the encoding resolution to be the same as the capture resolution.
4012+
* If you wish to achieve a relative balance between image quality, bandwidth, and system performance, then:
4013+
* When the capture resolution is greater than 1920 × 1080, it is recommended that the encoding resolution is not less than 1920 × 1080.
4014+
* When the capture resolution is less than 1920 × 1080, it is recommended that the encoding resolution is not less than 1280 × 720.
40114015
*/
40124016
dimensions?: VideoDimensions;
40134017
/**
@@ -4547,11 +4551,11 @@ export enum EncryptionErrorType {
45474551
*/
45484552
EncryptionErrorEncryptionFailure = 2,
45494553
/**
4550-
* @ignore
4554+
* 3: Data stream decryption error. Ensure that the receiver and the sender use the same encryption mode and key.
45514555
*/
45524556
EncryptionErrorDatastreamDecryptionFailure = 3,
45534557
/**
4554-
* @ignore
4558+
* 4: Data stream encryption error.
45554559
*/
45564560
EncryptionErrorDatastreamEncryptionFailure = 4,
45574561
}
@@ -4711,7 +4715,7 @@ export enum EarMonitoringFilterType {
47114715
*/
47124716
EarMonitoringFilterNoiseSuppression = 1 << 2,
47134717
/**
4714-
* @ignore
4718+
* 1<<15: Reuse the audio filter that has been processed on the sending end for in-ear monitoring. This enumerator reduces CPU usage while increasing in-ear monitoring latency, which is suitable for latency-tolerant scenarios requiring low CPU consumption.
47154719
*/
47164720
EarMonitoringFilterReusePostProcessingFilter = 1 << 15,
47174721
}

ts/Private/AgoraMediaBase.ts

Lines changed: 41 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -951,6 +951,8 @@ export interface IAudioFrameObserver extends IAudioFrameObserverBase {
951951
/**
952952
* Retrieves the audio frame of a specified user before mixing.
953953
*
954+
* Due to framework limitations, this callback does not support sending processed audio data back to the SDK.
955+
*
954956
* @param channelId The channel ID.
955957
* @param uid The user ID of the specified user.
956958
* @param audioFrame The raw audio data. See AudioFrame.
@@ -1060,9 +1062,7 @@ export interface IVideoFrameObserver {
10601062
/**
10611063
* Occurs each time the SDK receives a video frame captured by local devices.
10621064
*
1063-
* After you successfully register the video frame observer, the SDK triggers this callback each time it receives a video frame. In this callback, you can get the video data captured by local devices. You can then pre-process the data according to your scenarios. Once the pre-processing is complete, you can directly modify videoFrame in this callback, and set the return value to true to send the modified video data to the SDK.
1064-
* The video data that this callback gets has not been pre-processed such as watermarking, cropping, and rotating.
1065-
* If the video data type you get is RGBA, the SDK does not support processing the data of the alpha channel.
1065+
* You can get raw video data collected by the local device through this callback.
10661066
*
10671067
* @param sourceType Video source types, including cameras, screens, or media player. See VideoSourceType.
10681068
* @param videoFrame The video frame. See VideoFrame. The default value of the video frame data format obtained through this callback is as follows:
@@ -1078,6 +1078,7 @@ export interface IVideoFrameObserver {
10781078
* Occurs each time the SDK receives a video frame before encoding.
10791079
*
10801080
* After you successfully register the video frame observer, the SDK triggers this callback each time it receives a video frame. In this callback, you can get the video data before encoding and then process the data according to your particular scenarios. After processing, you can send the processed video data back to the SDK in this callback.
1081+
* Due to framework limitations, this callback does not support sending processed video data back to the SDK.
10811082
* The video data that this callback gets has been preprocessed, with its content cropped and rotated, and the image enhanced.
10821083
*
10831084
* @param sourceType The type of the video source. See VideoSourceType.
@@ -1100,6 +1101,7 @@ export interface IVideoFrameObserver {
11001101
*
11011102
* After you successfully register the video frame observer, the SDK triggers this callback each time it receives a video frame. In this callback, you can get the video data sent from the remote end before rendering, and then process it according to the particular scenarios.
11021103
* If the video data type you get is RGBA, the SDK does not support processing the data of the alpha channel.
1104+
* Due to framework limitations, this callback does not support sending processed video data back to the SDK.
11031105
*
11041106
* @param channelId The channel ID.
11051107
* @param remoteUid The user ID of the remote user who sends the current video frame.
@@ -1232,11 +1234,45 @@ export class MediaRecorderConfiguration {
12321234
}
12331235

12341236
/**
1235-
* @ignore
1237+
* Facial information observer.
1238+
*
1239+
* You can call registerFaceInfoObserver to register or unregister the IFaceInfoObserver object.
12361240
*/
12371241
export interface IFaceInfoObserver {
12381242
/**
1239-
* @ignore
1243+
* Occurs when the facial information processed by speech driven extension is received.
1244+
*
1245+
* @param outFaceInfo Output parameter, the JSON string of the facial information processed by the voice driver plugin, including the following fields:
1246+
* faces: Object sequence. The collection of facial information, with each face corresponding to an object.
1247+
* blendshapes: Object. The collection of face capture coefficients, named according to ARkit standards, with each key-value pair representing a blendshape coefficient. The blendshape coefficient is a floating point number with a range of [0.0, 1.0].
1248+
* rotation: Object sequence. The rotation of the head, which includes the following three key-value pairs, with values as floating point numbers ranging from -180.0 to 180.0:
1249+
* pitch: Head pitch angle. A positve value means looking down, while a negative value means looking up.
1250+
* yaw: Head yaw angle. A positve value means turning left, while a negative value means turning right.
1251+
* roll: Head roll angle. A positve value means tilting to the right, while a negative value means tilting to the left.
1252+
* timestamp: String. The timestamp of the output result, in milliseconds. Here is an example of JSON:
1253+
* {
1254+
* "faces":[{
1255+
* "blendshapes":{
1256+
* "eyeBlinkLeft":0.9, "eyeLookDownLeft":0.0, "eyeLookInLeft":0.0, "eyeLookOutLeft":0.0, "eyeLookUpLeft":0.0,
1257+
* "eyeSquintLeft":0.0, "eyeWideLeft":0.0, "eyeBlinkRight":0.0, "eyeLookDownRight":0.0, "eyeLookInRight":0.0,
1258+
* "eyeLookOutRight":0.0, "eyeLookUpRight":0.0, "eyeSquintRight":0.0, "eyeWideRight":0.0, "jawForward":0.0,
1259+
* "jawLeft":0.0, "jawRight":0.0, "jawOpen":0.0, "mouthClose":0.0, "mouthFunnel":0.0, "mouthPucker":0.0,
1260+
* "mouthLeft":0.0, "mouthRight":0.0, "mouthSmileLeft":0.0, "mouthSmileRight":0.0, "mouthFrownLeft":0.0,
1261+
* "mouthFrownRight":0.0, "mouthDimpleLeft":0.0, "mouthDimpleRight":0.0, "mouthStretchLeft":0.0, "mouthStretchRight":0.0,
1262+
* "mouthRollLower":0.0, "mouthRollUpper":0.0, "mouthShrugLower":0.0, "mouthShrugUpper":0.0, "mouthPressLeft":0.0,
1263+
* "mouthPressRight":0.0, "mouthLowerDownLeft":0.0, "mouthLowerDownRight":0.0, "mouthUpperUpLeft":0.0, "mouthUpperUpRight":0.0,
1264+
* "browDownLeft":0.0, "browDownRight":0.0, "browInnerUp":0.0, "browOuterUpLeft":0.0, "browOuterUpRight":0.0,
1265+
* "cheekPuff":0.0, "cheekSquintLeft":0.0, "cheekSquintRight":0.0, "noseSneerLeft":0.0, "noseSneerRight":0.0,
1266+
* "tongueOut":0.0
1267+
* },
1268+
* "rotation":{"pitch":30.0, "yaw":25.5, "roll":-15.5},
1269+
*
1270+
* }],
1271+
* "timestamp":"654879876546"
1272+
* }
1273+
*
1274+
* @returns
1275+
* true : Facial information JSON parsing successful. false : Facial information JSON parsing failed.
12401276
*/
12411277
onFaceInfo?(outFaceInfo: string): void;
12421278
}

ts/Private/IAgoraMediaEngine.ts

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,17 @@ export abstract class IMediaEngine {
100100
): number;
101101

102102
/**
103-
* @ignore
103+
* Registers a facial information observer.
104+
*
105+
* You can call this method to register the onFaceInfo callback to receive the facial information processed by Agora speech driven extension. When calling this method to register a facial information observer, you can register callbacks in the IFaceInfoObserver class as needed. After successfully registering the facial information observer, the SDK triggers the callback you have registered when it captures the facial information converted by the speech driven extension.
106+
* Ensure that you call this method before joining a channel.
107+
* Before calling this method, you need to make sure that the speech driven extension has been enabled by calling enableExtension.
108+
*
109+
* @param observer Facial information observer, see IFaceInfoObserver.
110+
*
111+
* @returns
112+
* 0: Success.
113+
* < 0: Failure.
104114
*/
105115
abstract registerFaceInfoObserver(observer: IFaceInfoObserver): number;
106116

@@ -313,7 +323,13 @@ export abstract class IMediaEngine {
313323
): number;
314324

315325
/**
316-
* @ignore
326+
* Unregisters a facial information observer.
327+
*
328+
* @param observer Facial information observer, see IFaceInfoObserver.
329+
*
330+
* @returns
331+
* 0: Success.
332+
* < 0: Failure.
317333
*/
318334
abstract unregisterFaceInfoObserver(observer: IFaceInfoObserver): number;
319335
}

0 commit comments

Comments
 (0)