337 lines
12 KiB
JavaScript
337 lines
12 KiB
JavaScript
const VoiceEngine = require('./discord_voice_'+process.platform+'.node');
|
|
const fs = require('fs');
|
|
const path = require('path');
|
|
const yargs = require('yargs');
|
|
|
|
const isElectronRenderer =
|
|
typeof window !== 'undefined' && window != null && window.DiscordNative && window.DiscordNative.isRenderer;
|
|
|
|
const appSettings = isElectronRenderer ? window.DiscordNative.settings : global.appSettings;
|
|
const features = isElectronRenderer ? window.DiscordNative.features : global.features;
|
|
const mainArgv = isElectronRenderer ? window.DiscordNative.processUtils.getMainArgvSync() : [];
|
|
let dataDirectory;
|
|
|
|
try {
|
|
dataDirectory =
|
|
isElectronRenderer && window.DiscordNative.fileManager.getModuleDataPathSync
|
|
? path.join(window.DiscordNative.fileManager.getModuleDataPathSync(), 'discord_voice')
|
|
: null;
|
|
} catch (e) {
|
|
console.error('Failed to get data directory: ', e);
|
|
}
|
|
|
|
const releaseChannel = isElectronRenderer ? window.DiscordNative.app.getReleaseChannel() : '';
|
|
const useLegacyAudioDevice = appSettings ? appSettings.getSync('useLegacyAudioDevice') : false;
|
|
const audioSubsystemSelected = appSettings
|
|
? appSettings.getSync('audioSubsystem') === 'legacy'
|
|
? 'legacy'
|
|
: 'standard'
|
|
: 'standard';
|
|
const audioSubsystem = useLegacyAudioDevice || audioSubsystemSelected;
|
|
const debugLogging = appSettings ? appSettings.getSync('debugLogging') : false;
|
|
|
|
const argv = yargs(mainArgv.slice(1))
|
|
.describe('log-level', 'Logging level.')
|
|
.default('log-level', -1)
|
|
.help('h')
|
|
.alias('h', 'help')
|
|
.exitProcess(false).argv;
|
|
const logLevel = argv['log-level'] == -1 ? (debugLogging ? 2 : -1) : argv['log-level'];
|
|
|
|
if (dataDirectory != null) {
|
|
try {
|
|
fs.mkdirSync(dataDirectory, {recursive: true});
|
|
} catch (e) {
|
|
console.warn("Couldn't create voice data directory ", dataDirectory, ':', e);
|
|
}
|
|
}
|
|
|
|
if (debugLogging && console.discordVoiceHooked == null) {
|
|
console.discordVoiceHooked = true;
|
|
|
|
for (const logFn of ['trace', 'debug', 'info', 'warn', 'error', 'log']) {
|
|
const originalLogFn = console[logFn];
|
|
|
|
if (originalLogFn != null) {
|
|
console[logFn] = function () {
|
|
originalLogFn.apply(this, arguments);
|
|
|
|
try {
|
|
VoiceEngine.consoleLog(
|
|
logFn,
|
|
JSON.stringify(Array.from(arguments).map((v) => (v != null ? v.toString() : v)))
|
|
);
|
|
} catch (e) {
|
|
// Drop errors from toString()/stringify.
|
|
}
|
|
};
|
|
}
|
|
}
|
|
}
|
|
|
|
features.declareSupported('voice_panning');
|
|
features.declareSupported('voice_multiple_connections');
|
|
features.declareSupported('media_devices');
|
|
features.declareSupported('media_video');
|
|
features.declareSupported('debug_logging');
|
|
features.declareSupported('set_audio_device_by_id');
|
|
features.declareSupported('set_video_device_by_id');
|
|
features.declareSupported('loopback');
|
|
features.declareSupported('experiment_config');
|
|
features.declareSupported('remote_locus_network_control');
|
|
features.declareSupported('connection_replay');
|
|
features.declareSupported('simulcast');
|
|
features.declareSupported('direct_video');
|
|
|
|
if (process.platform === 'win32') {
|
|
features.declareSupported('voice_legacy_subsystem');
|
|
features.declareSupported('soundshare');
|
|
features.declareSupported('wumpus_video');
|
|
features.declareSupported('hybrid_video');
|
|
features.declareSupported('elevated_hook');
|
|
features.declareSupported('soundshare_loopback');
|
|
features.declareSupported('screen_previews');
|
|
features.declareSupported('window_previews');
|
|
features.declareSupported('audio_debug_state');
|
|
features.declareSupported('video_effects');
|
|
// NOTE(jvass): currently there's no experimental encoders! Add this back if you
|
|
// add one and want to re-enable the UI for them.
|
|
// features.declareSupported('experimental_encoders');
|
|
}
|
|
|
|
function bindConnectionInstance(instance) {
|
|
return {
|
|
destroy: () => instance.destroy(),
|
|
|
|
setTransportOptions: (options) => instance.setTransportOptions(options),
|
|
setSelfMute: (mute) => instance.setSelfMute(mute),
|
|
setSelfDeafen: (deaf) => instance.setSelfDeafen(deaf),
|
|
|
|
mergeUsers: (users) => instance.mergeUsers(users),
|
|
destroyUser: (userId) => instance.destroyUser(userId),
|
|
|
|
setLocalVolume: (userId, volume) => instance.setLocalVolume(userId, volume),
|
|
setLocalMute: (userId, mute) => instance.setLocalMute(userId, mute),
|
|
setLocalPan: (userId, left, right) => instance.setLocalPan(userId, left, right),
|
|
setDisableLocalVideo: (userId, disabled) => instance.setDisableLocalVideo(userId, disabled),
|
|
|
|
setMinimumOutputDelay: (delay) => instance.setMinimumOutputDelay(delay),
|
|
getEncryptionModes: (callback) => instance.getEncryptionModes(callback),
|
|
configureConnectionRetries: (baseDelay, maxDelay, maxAttempts) =>
|
|
instance.configureConnectionRetries(baseDelay, maxDelay, maxAttempts),
|
|
setOnSpeakingCallback: (callback) => instance.setOnSpeakingCallback(callback),
|
|
setOnSpeakingWhileMutedCallback: (callback) => instance.setOnSpeakingWhileMutedCallback(callback),
|
|
setPingInterval: (interval) => instance.setPingInterval(interval),
|
|
setPingCallback: (callback) => instance.setPingCallback(callback),
|
|
setPingTimeoutCallback: (callback) => instance.setPingTimeoutCallback(callback),
|
|
setRemoteUserSpeakingStatus: (userId, speaking) => instance.setRemoteUserSpeakingStatus(userId, speaking),
|
|
setRemoteUserCanHavePriority: (userId, canHavePriority) =>
|
|
instance.setRemoteUserCanHavePriority(userId, canHavePriority),
|
|
|
|
setOnVideoCallback: (callback) => instance.setOnVideoCallback(callback),
|
|
setVideoBroadcast: (broadcasting) => instance.setVideoBroadcast(broadcasting),
|
|
setDesktopSource: (id, videoHook, type) => instance.setDesktopSource(id, videoHook, type),
|
|
setDesktopSourceStatusCallback: (callback) => instance.setDesktopSourceStatusCallback(callback),
|
|
setOnDesktopSourceEnded: (callback) => instance.setOnDesktopSourceEnded(callback),
|
|
setOnSoundshare: (callback) => instance.setOnSoundshare(callback),
|
|
setOnSoundshareEnded: (callback) => instance.setOnSoundshareEnded(callback),
|
|
setOnSoundshareFailed: (callback) => instance.setOnSoundshareFailed(callback),
|
|
setPTTActive: (active, priority) => instance.setPTTActive(active, priority),
|
|
getStats: (callback) => instance.getStats(callback),
|
|
getFilteredStats: (filter, callback) => instance.getFilteredStats(filter, callback),
|
|
startReplay: () => instance.startReplay(),
|
|
};
|
|
}
|
|
|
|
VoiceEngine.createTransport = VoiceEngine._createTransport;
|
|
|
|
if (isElectronRenderer) {
|
|
VoiceEngine.setImageDataAllocator((width, height) => new window.ImageData(width, height));
|
|
}
|
|
|
|
VoiceEngine.createVoiceConnection = function (audioSSRC, userId, address, port, onConnectCallback, experiments, rids) {
|
|
let instance = null;
|
|
if (rids != null) {
|
|
instance = new VoiceEngine.VoiceConnection(audioSSRC, userId, address, port, onConnectCallback, experiments, rids);
|
|
} else if (experiments != null) {
|
|
instance = new VoiceEngine.VoiceConnection(audioSSRC, userId, address, port, onConnectCallback, experiments);
|
|
} else {
|
|
instance = new VoiceEngine.VoiceConnection(audioSSRC, userId, address, port, onConnectCallback);
|
|
}
|
|
return bindConnectionInstance(instance);
|
|
};
|
|
VoiceEngine.createOwnStreamConnection = VoiceEngine.createVoiceConnection;
|
|
|
|
VoiceEngine.createReplayConnection = function (audioEngineId, callback, replayLog) {
|
|
if (replayLog == null) {
|
|
return null;
|
|
}
|
|
|
|
return bindConnectionInstance(new VoiceEngine.VoiceReplayConnection(replayLog, audioEngineId, callback));
|
|
};
|
|
|
|
VoiceEngine.setAudioSubsystem = function (subsystem) {
|
|
if (appSettings == null) {
|
|
console.warn('Unable to access app settings.');
|
|
return;
|
|
}
|
|
|
|
// TODO: With experiment controlling ADM selection, this may be incorrect since
|
|
// audioSubsystem is read from settings (or default if does not exists)
|
|
// and not the actual ADM used.
|
|
if (subsystem === audioSubsystem) {
|
|
return;
|
|
}
|
|
|
|
appSettings.set('audioSubsystem', subsystem);
|
|
appSettings.set('useLegacyAudioDevice', false);
|
|
|
|
if (isElectronRenderer) {
|
|
window.DiscordNative.app.relaunch();
|
|
}
|
|
};
|
|
|
|
VoiceEngine.setDebugLogging = function (enable) {
|
|
if (appSettings == null) {
|
|
console.warn('Unable to access app settings.');
|
|
return;
|
|
}
|
|
|
|
if (debugLogging === enable) {
|
|
return;
|
|
}
|
|
|
|
appSettings.set('debugLogging', enable);
|
|
|
|
if (isElectronRenderer) {
|
|
window.DiscordNative.app.relaunch();
|
|
}
|
|
};
|
|
|
|
VoiceEngine.getDebugLogging = function () {
|
|
return debugLogging;
|
|
};
|
|
|
|
const videoStreams = {};
|
|
|
|
const ensureCanvasContext = function (sinkId) {
|
|
let canvas = document.getElementById(sinkId);
|
|
if (canvas == null) {
|
|
for (const popout of window.popouts.values()) {
|
|
const element = popout.document != null && popout.document.getElementById(sinkId);
|
|
if (element != null) {
|
|
canvas = element;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (canvas == null) {
|
|
return null;
|
|
}
|
|
}
|
|
|
|
const context = canvas.getContext('2d');
|
|
if (context == null) {
|
|
console.log(`Failed to initialize context for sinkId ${sinkId}`);
|
|
return null;
|
|
}
|
|
|
|
return context;
|
|
};
|
|
|
|
// [adill] NB: with context isolation it has become extremely costly (both memory & performance) to provide the image
|
|
// data directly to clients at any reasonably fast interval so we've replaced setVideoOutputSink with a direct canvas
|
|
// renderer via addVideoOutputSink
|
|
const setVideoOutputSink = VoiceEngine.setVideoOutputSink;
|
|
const clearVideoOutputSink = (streamId) => {
|
|
// [adill] NB: if you don't pass a frame callback setVideoOutputSink clears the sink
|
|
setVideoOutputSink(streamId);
|
|
};
|
|
const signalVideoOutputSinkReady = VoiceEngine.signalVideoOutputSinkReady;
|
|
delete VoiceEngine.setVideoOutputSink;
|
|
delete VoiceEngine.signalVideoOutputSinkReady;
|
|
|
|
function addVideoOutputSinkInternal(sinkId, streamId, frameCallback) {
|
|
let sinks = videoStreams[streamId];
|
|
if (sinks == null) {
|
|
sinks = videoStreams[streamId] = new Map();
|
|
}
|
|
|
|
if (sinks.size === 0) {
|
|
console.log(`Subscribing to frames for streamId ${streamId}`);
|
|
const onFrame = (imageData) => {
|
|
const sinks = videoStreams[streamId];
|
|
if (sinks != null) {
|
|
for (const callback of sinks.values()) {
|
|
if (callback != null) {
|
|
callback(imageData);
|
|
}
|
|
}
|
|
}
|
|
signalVideoOutputSinkReady(streamId);
|
|
};
|
|
setVideoOutputSink(streamId, onFrame, true);
|
|
}
|
|
|
|
sinks.set(sinkId, frameCallback);
|
|
}
|
|
|
|
VoiceEngine.addVideoOutputSink = function (sinkId, streamId, frameCallback) {
|
|
let canvasContext = null;
|
|
addVideoOutputSinkInternal(sinkId, streamId, (imageData) => {
|
|
if (canvasContext == null) {
|
|
canvasContext = ensureCanvasContext(sinkId);
|
|
if (canvasContext == null) {
|
|
return;
|
|
}
|
|
}
|
|
if (frameCallback != null) {
|
|
frameCallback(imageData.width, imageData.height);
|
|
}
|
|
// [adill] NB: Electron 9+ on macOS would show massive leaks in the the GPU helper process when a non-Discord
|
|
// window completely occludes the Discord window. Adding this tiny readback ameliorates the issue. We tried WebGL
|
|
// rendering which did not exhibit the issue, however, the context limit of 16 was too small to be a real
|
|
// alternative.
|
|
const leak = canvasContext.getImageData(0, 0, 1, 1);
|
|
canvasContext.putImageData(imageData, 0, 0);
|
|
});
|
|
};
|
|
|
|
VoiceEngine.removeVideoOutputSink = function (sinkId, streamId) {
|
|
const sinks = videoStreams[streamId];
|
|
if (sinks != null) {
|
|
sinks.delete(sinkId);
|
|
if (sinks.size === 0) {
|
|
delete videoStreams[streamId];
|
|
console.log(`Unsubscribing from frames for streamId ${streamId}`);
|
|
clearVideoOutputSink(streamId);
|
|
}
|
|
}
|
|
};
|
|
|
|
let sinkId = 0;
|
|
VoiceEngine.getNextVideoOutputFrame = function (streamId) {
|
|
const nextVideoFrameSinkId = `getNextVideoFrame_${++sinkId}`;
|
|
|
|
return new Promise((resolve, reject) => {
|
|
setTimeout(() => {
|
|
VoiceEngine.removeVideoOutputSink(nextVideoFrameSinkId, streamId);
|
|
reject(new Error('getNextVideoOutputFrame timeout'));
|
|
}, 5000);
|
|
|
|
addVideoOutputSinkInternal(nextVideoFrameSinkId, streamId, (imageData) => {
|
|
VoiceEngine.removeVideoOutputSink(nextVideoFrameSinkId, streamId);
|
|
resolve({
|
|
width: imageData.width,
|
|
height: imageData.height,
|
|
data: new Uint8ClampedArray(imageData.data.buffer),
|
|
});
|
|
});
|
|
});
|
|
};
|
|
|
|
console.log(`Initializing voice engine with audio subsystem: ${audioSubsystem}`);
|
|
VoiceEngine.initialize({audioSubsystem, logLevel, dataDirectory});
|
|
|
|
module.exports = VoiceEngine;
|