- initial import

This commit is contained in:
2018-06-05 11:05:37 +03:00
commit e1a4931375
4673 changed files with 1383093 additions and 0 deletions

View File

@@ -0,0 +1,583 @@
#include "Audio_Android.h"
#include "../helper/HL_Sync.h"
#include "../helper/HL_Log.h"
#include <mutex>
#include <iostream>
#include "../helper/HL_String.h"
#ifdef TARGET_ANDROID
#define LOG_SUBSYSTEM "Audio"
using namespace Audio;
// -------------------- AndroidEnumerator -----------------------------
AndroidEnumerator::AndroidEnumerator()
{}
AndroidEnumerator::~AndroidEnumerator()
{}
int AndroidEnumerator::indexOfDefaultDevice()
{
return 0;
}
int AndroidEnumerator::count()
{
return 1;
}
int AndroidEnumerator::idAt(int index)
{
return 0;
}
std::string AndroidEnumerator::nameAt(int index)
{
return "Audio";
}
void AndroidEnumerator::open(int direction)
{}
void AndroidEnumerator::close()
{}
// -----------------------
OpenSLEngine::OpenSLEngine()
{}
OpenSLEngine::~OpenSLEngine()
{}
void OpenSLEngine::open()
{
std::unique_lock<std::mutex> l(mMutex);
if (++mUsageCounter == 1)
internalOpen();
}
void OpenSLEngine::close()
{
std::unique_lock<std::mutex> l(mMutex);
if (--mUsageCounter == 0)
internalClose();
}
#define CHECK_OPENSLES_ERROR if (resultCode != SL_RESULT_SUCCESS) throw Exception(ERR_OPENSLES, (int)resultCode)
void OpenSLEngine::internalOpen()
{
SLresult resultCode;
// Instantiate OpenSL ES engine object
resultCode = slCreateEngine(&mEngineObject, 0, nullptr, 0, nullptr, nullptr);
CHECK_OPENSLES_ERROR;
// Bring it online (realize)
resultCode = (*mEngineObject)->Realize(mEngineObject, SL_BOOLEAN_FALSE);
CHECK_OPENSLES_ERROR;
// Get interface finally
resultCode = (*mEngineObject)->GetInterface(mEngineObject, SL_IID_ENGINE, &mEngineInterface);
CHECK_OPENSLES_ERROR;
ICELogInfo(<< "OpenSL engine object created.");
}
void OpenSLEngine::internalClose()
{
if (mEngineObject != nullptr)
{
ICELogInfo(<< "Destroy OpenSL engine object.");
(*mEngineObject)->Destroy(mEngineObject);
mEngineObject = nullptr;
mEngineInterface = nullptr;
}
}
SLEngineItf OpenSLEngine::getNativeEngine() const
{
return mEngineInterface;
}
static OpenSLEngine OpenSLEngineInstance;
OpenSLEngine& OpenSLEngine::instance()
{
return OpenSLEngineInstance;
}
// --------------- Input implementation ----------------
AndroidInputDevice::AndroidInputDevice(int devId)
{}
AndroidInputDevice::~AndroidInputDevice()
{}
static int RateToProbe[12][2] = {
{ SL_SAMPLINGRATE_16, 16000 },
{ SL_SAMPLINGRATE_8, 8000 },
{ SL_SAMPLINGRATE_32, 32000 },
{ SL_SAMPLINGRATE_44_1, 44100 },
{ SL_SAMPLINGRATE_11_025, 10025 },
{ SL_SAMPLINGRATE_22_05, 22050 },
{ SL_SAMPLINGRATE_24, 24000 },
{ SL_SAMPLINGRATE_48, 48000 },
{ SL_SAMPLINGRATE_64, 64000 },
{ SL_SAMPLINGRATE_88_2, 88200 },
{ SL_SAMPLINGRATE_96, 96000 },
{ SL_SAMPLINGRATE_192, 192000} };
bool AndroidInputDevice::open()
{
if (active())
return true;
OpenSLEngine::instance().open();
// Probe few sampling rates
bool opened = false;
for (int rateIndex = 0; rateIndex < 12 && !opened; rateIndex++)
{
try
{
internalOpen(RateToProbe[rateIndex][0], RateToProbe[rateIndex][1]);
mDeviceRate = RateToProbe[rateIndex][1];
ICELogInfo(<< "Input Opened with rate " << mDeviceRate << " and rate index " << rateIndex);
opened = mDeviceRate != 0;
if (!opened)
internalClose();
}
catch(...)
{
opened = false;
internalClose();
}
}
mActive = opened;
return opened;
}
void AndroidInputDevice::close()
{
// There is no check for active() value because close() can be called to cleanup after bad open() call.
internalClose();
OpenSLEngine::instance().close();
mActive = false;
}
Format AndroidInputDevice::getFormat()
{
return Format(mDeviceRate, 1);
}
bool AndroidInputDevice::active() const
{
return mActive;
}
bool AndroidInputDevice::fakeMode()
{
return false;
}
void AndroidInputDevice::setFakeMode(bool fakemode)
{}
int AndroidInputDevice::readBuffer(void* buffer)
{
std::unique_lock<std::mutex> l(mMutex);
while (mSdkRateCache.filled() < AUDIO_MIC_BUFFER_SIZE)
{
mDataCondVar.wait(l);
}
return mSdkRateCache.read(buffer, AUDIO_MIC_BUFFER_SIZE);
}
void AndroidInputDevice::internalOpen(int rateCode, int rate)
{
SLresult resultCode = 0;
SLuint32 nrOfChannels = 1;
// Prepare audio source
SLDataLocator_IODevice devDescription = { SL_DATALOCATOR_IODEVICE, SL_IODEVICE_AUDIOINPUT, SL_DEFAULTDEVICEID_AUDIOINPUT, NULL};
SLDataSource audioSource = { &devDescription, NULL };
// Source flags
SLuint32 speakersFlags = nrOfChannels > 1 ? (SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT) : SL_SPEAKER_FRONT_CENTER;
// Buffer queue
SLDataLocator_AndroidSimpleBufferQueue queueDescription = { SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2 };
// Audio format
SLDataFormat_PCM formatDescription = { SL_DATAFORMAT_PCM, nrOfChannels, (SLuint32)rateCode, SL_PCMSAMPLEFORMAT_FIXED_16,
SL_PCMSAMPLEFORMAT_FIXED_16, (SLuint32)speakersFlags, SL_BYTEORDER_LITTLEENDIAN };
SLDataSink audioSink = { &queueDescription, &formatDescription };
// Create recorder
// Do not forget about RECORD_AUDIO permission
const SLInterfaceID interfacesList[2] = { SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION };
const SLboolean interfacesRequirements[2] = { SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
if (*OpenSLEngine::instance().getNativeEngine() == nullptr)
throw Exception(ERR_OPENSLES, -1);
resultCode = (*OpenSLEngine::instance().getNativeEngine())->CreateAudioRecorder(
OpenSLEngine::instance().getNativeEngine(),
&mRecorderObject, &audioSource, &audioSink, 2, interfacesList, interfacesRequirements);
CHECK_OPENSLES_ERROR;
// Obtain stream type
resultCode = (*mRecorderObject)->GetInterface(mRecorderObject, SL_IID_ANDROIDCONFIGURATION, &mAndroidCfg);
CHECK_OPENSLES_ERROR;
// Now audio recorder goes to real world
resultCode = (*mRecorderObject)->Realize(mRecorderObject, SL_BOOLEAN_FALSE);
CHECK_OPENSLES_ERROR;
// Get recorder interface
resultCode = (*mRecorderObject)->GetInterface(mRecorderObject, SL_IID_RECORD, &mRecorderInterface);
CHECK_OPENSLES_ERROR;
// Now buffer queue interface...
resultCode = (*mRecorderObject)->GetInterface(mRecorderObject, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &mRecorderBufferInterface);
CHECK_OPENSLES_ERROR;
// Resampler is needed to provide SDK's rate
mResampler = std::make_shared<Resampler>();
mResampler->start(nrOfChannels, rate, AUDIO_SAMPLERATE);
// Allocate recorder buffer size
mBufferSize = (AUDIO_MIC_BUFFER_LENGTH / 10) * (rate / 100) * 2;
mRecorderBuffer.setCapacity(mBufferSize * AUDIO_MIC_BUFFER_COUNT);
mRecorderBufferIndex = 0;
// Setup data consuming callback
resultCode = (*mRecorderBufferInterface)->RegisterCallback(mRecorderBufferInterface, DeviceCallback, (void*)this);
CHECK_OPENSLES_ERROR;
// Setup buffers
for (int i=0; i<AUDIO_MIC_BUFFER_COUNT; i++)
(*mRecorderBufferInterface)->Enqueue(mRecorderBufferInterface, mRecorderBuffer.data() + i * mBufferSize, mBufferSize);
// Start finally
resultCode = (*mRecorderInterface)->SetRecordState(mRecorderInterface, SL_RECORDSTATE_RECORDING);
CHECK_OPENSLES_ERROR;
}
void AndroidInputDevice::internalClose()
{
if (!mRecorderObject)
return;
if (*mRecorderObject)
{
if (active())
{
// Stop recording
(*mRecorderInterface)->SetRecordState(mRecorderInterface, SL_RECORDSTATE_STOPPED);
// Wait until recording will not stop really
SLuint32 state = SL_RECORDSTATE_STOPPED;
do
{
(*mRecorderInterface)->GetRecordState(mRecorderInterface, &state);
SyncHelper::delay(1);
}
while (state == SL_RECORDSTATE_RECORDING);
}
(*mRecorderObject)->Destroy(mRecorderObject);
}
mRecorderObject = nullptr;
mRecorderInterface = nullptr;
mRecorderBufferInterface = nullptr;
mAndroidCfg = nullptr;
}
void AndroidInputDevice::handleCallback(SLAndroidSimpleBufferQueueItf bq)
{
std::unique_lock<std::mutex> l(mMutex);
// Send data to AudioPair
if (mConnection)
mConnection->onMicData(getFormat(), mRecorderBuffer.data() + mRecorderBufferIndex * mBufferSize, mBufferSize);
/*
// Send audio to cache with native sample rate
mDeviceRateCache.add(mRecorderBuffer.data() + mRecorderBufferIndex * mBufferSize, mBufferSize);
// Check if there is enough data (10 ms) to send
int tenMsSize = (int)Format(mDeviceRate, 1).sizeFromTime(10);
while (mDeviceRateCache.filled() >= tenMsSize)
{
char* resampled = (char*)alloca(Format().sizeFromTime(10));
int processed = 0;
int outlen = mResampler->processBuffer(mDeviceRateCache.data(), tenMsSize, processed, resampled, Format().sizeFromTime(10));
if (outlen > 0)
mSdkRateCache.add(resampled, (int)Format().sizeFromTime(10));
mDeviceRateCache.erase(tenMsSize);
}
// Tell about data
while (mSdkRateCache.filled() >= AUDIO_MIC_BUFFER_SIZE)
{
if (mConnection)
mConnection->onMicData(Format(), mSdkRateCache.data(), AUDIO_MIC_BUFFER_SIZE);
mSdkRateCache.erase(AUDIO_MIC_BUFFER_SIZE);
}
*/
// Re-enqueue used buffer
(*mRecorderBufferInterface)->Enqueue(mRecorderBufferInterface, mRecorderBuffer.data() + mRecorderBufferIndex * mBufferSize, mBufferSize);
mRecorderBufferIndex++;
mRecorderBufferIndex %= AUDIO_MIC_BUFFER_COUNT;
}
void AndroidInputDevice::DeviceCallback(SLAndroidSimpleBufferQueueItf bq, void *context)
{
try
{
if (context)
reinterpret_cast<AndroidInputDevice*>(context)->handleCallback(bq);
}
catch(...)
{}
}
// ------------ AndroidOutputDevice -----------------
AndroidOutputDevice::AndroidOutputDevice(int devId)
{
ICELogDebug(<< "Creating AndroidOutputDevice. This is: " << StringHelper::toHex(this));
}
AndroidOutputDevice::~AndroidOutputDevice()
{
ICELogDebug(<< "Deleting AndroidOutputDevice.");
close();
}
bool AndroidOutputDevice::open()
{
std::unique_lock<std::mutex> l(mMutex);
bool opened = false;
for (int rateIndex = 0; rateIndex < 12 && !opened; rateIndex++)
{
try
{
internalOpen(RateToProbe[rateIndex][0], RateToProbe[rateIndex][1], true);
opened = true;
mDeviceRate = RateToProbe[rateIndex][1];
ICELogCritical(<< "Output opened with rate " << mDeviceRate << " and index " << rateIndex);
}
catch(...)
{
opened = false;
}
}
if (opened)
ICELogInfo(<< "Speaker opened on rate " << mDeviceRate);
return opened;
}
void AndroidOutputDevice::close()
{
std::unique_lock<std::mutex> l(mMutex);
internalClose();
}
Format AndroidOutputDevice::getFormat()
{
return Format(mDeviceRate, 1);
}
bool AndroidOutputDevice::fakeMode()
{
return false;
}
void AndroidOutputDevice::setFakeMode(bool fakemode)
{
}
void AndroidOutputDevice::internalOpen(int rateId, int rate, bool voice)
{
mInShutdown = false;
SLresult resultCode;
SLuint32 channels = 1;
// Configure audio source
SLDataLocator_AndroidSimpleBufferQueue queue_desc = { SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2 };
const SLInterfaceID interfacesList[] = { SL_IID_VOLUME };
const SLboolean interfaceRequirements[] = { SL_BOOLEAN_FALSE };
resultCode = (*OpenSLEngine::instance().getNativeEngine())->CreateOutputMix(
OpenSLEngine::instance().getNativeEngine(), &mMixer, 1, interfacesList,
interfaceRequirements);
CHECK_OPENSLES_ERROR;
// Bring mixer online
resultCode = (*mMixer)->Realize(mMixer, SL_BOOLEAN_FALSE);
CHECK_OPENSLES_ERROR;
// Prepare mixer configuration
SLuint32 speakers =
channels > 1 ? (SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT) : SL_SPEAKER_FRONT_CENTER;
// Describe audio format
SLDataFormat_PCM pcm_format = {SL_DATAFORMAT_PCM, channels, (SLuint32) rateId,
SL_PCMSAMPLEFORMAT_FIXED_16, SL_PCMSAMPLEFORMAT_FIXED_16,
speakers, SL_BYTEORDER_LITTLEENDIAN};
// Describe audio source - buffers + audio format
SLDataSource audio_source = { &queue_desc, &pcm_format };
// Describe audio sink
SLDataLocator_OutputMix mixer_desc = { SL_DATALOCATOR_OUTPUTMIX, mMixer };
SLDataSink audio_sink = { &mixer_desc, NULL };
// Create player instance
const SLInterfaceID playerInterfaces[] = { SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
SL_IID_VOLUME,
SL_IID_ANDROIDCONFIGURATION };
const SLboolean playerInterfacesReqs[] = { SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
resultCode = (*OpenSLEngine::instance().getNativeEngine())->CreateAudioPlayer(
OpenSLEngine::instance().getNativeEngine(), &mPlayer,
&audio_source, &audio_sink, 3, playerInterfaces, playerInterfacesReqs);
CHECK_OPENSLES_ERROR;
// Get android config interface
resultCode = (*mPlayer)->GetInterface(mPlayer, SL_IID_ANDROIDCONFIGURATION, &mAndroidConfig);
if (resultCode == SL_RESULT_SUCCESS)
{
SLint32 streamType = voice ? SL_ANDROID_STREAM_VOICE : SL_ANDROID_STREAM_MEDIA;
resultCode = (*mAndroidConfig)->SetConfiguration(mAndroidConfig, SL_ANDROID_KEY_STREAM_TYPE,
&streamType, sizeof(SLint32));
if (resultCode != SL_RESULT_SUCCESS)
ICELogCritical(<< "Failed to set audio destination with error " << (unsigned)resultCode);
}
else
ICELogCritical(<< "Failed to obtain android cfg audio interface with error " << (unsigned)resultCode);
// Bring player online
resultCode = (*mPlayer)->Realize(mPlayer, SL_BOOLEAN_FALSE);
CHECK_OPENSLES_ERROR;
// Obtain player control
resultCode = (*mPlayer)->GetInterface(mPlayer, SL_IID_PLAY, &mPlayerControl);
CHECK_OPENSLES_ERROR;
// Get the buffer queue interface
resultCode = (*mPlayer)->GetInterface(mPlayer, SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
&mBufferQueue);
CHECK_OPENSLES_ERROR;
// Setup callback
resultCode = (*mBufferQueue)->RegisterCallback(mBufferQueue, DeviceCallback, this);
CHECK_OPENSLES_ERROR;
// Enqueue buffers
mBufferSize = (int)Format(rate, channels).sizeFromTime(AUDIO_SPK_BUFFER_LENGTH);
mPlayBuffer.setCapacity(AUDIO_SPK_BUFFER_COUNT * mBufferSize);
mBufferIndex = 0;
for (int i = 0; i < AUDIO_SPK_BUFFER_COUNT; i++)
(*mBufferQueue)->Enqueue(mBufferQueue, mPlayBuffer.data() + i * mBufferSize,
(SLuint32)mBufferSize);
// Set the player's state to playing
resultCode = (*mPlayerControl)->SetPlayState(mPlayerControl, SL_PLAYSTATE_PLAYING);
CHECK_OPENSLES_ERROR;
}
void AndroidOutputDevice::internalClose()
{
if (mPlayer)
{
if (*mPlayer)
{
mInShutdown = true;
ICELogInfo(<< "Stop player");
if (mPlayerControl) {
if (*mPlayerControl) {
SLuint32 state = SL_PLAYSTATE_PLAYING;
(*mPlayerControl)->SetPlayState(mPlayerControl, SL_PLAYSTATE_STOPPED);
while (state != SL_PLAYSTATE_STOPPED) {
(*mPlayerControl)->GetPlayState(mPlayerControl, &state);
SyncHelper::delay(1);
}
}
}
// Clear buffer queue
ICELogInfo(<< "Clear player buffer queue");
(*mBufferQueue)->Clear(mBufferQueue);
ICELogInfo(<< "Destroy player object");
// Destroy player object
(*mPlayer)->Destroy(mPlayer);
mPlayer = nullptr;
mPlayerControl = nullptr;
mBufferQueue = nullptr;
mEffect = nullptr;
mAndroidConfig = nullptr;
}
}
if (mMixer)
{
if (*mMixer)
(*mMixer)->Destroy(mMixer);
mMixer = nullptr;
}
}
void AndroidOutputDevice::handleCallback(SLAndroidSimpleBufferQueueItf bq)
{
if (mInShutdown)
{
char silence[mBufferSize]; memset(silence, 0, mBufferSize);
(*mBufferQueue)->Enqueue(mBufferQueue, silence, mBufferSize);
return;
}
// Ask producer about data
char* buffer = mPlayBuffer.mutableData() + mBufferIndex * mBufferSize;
if (mConnection)
{
Format f = getFormat();
if (f.mRate != 0)
mConnection->onSpkData(f, buffer, mBufferSize);
}
(*mBufferQueue)->Enqueue(mBufferQueue, buffer, (SLuint32)mBufferSize);
mBufferIndex++;
mBufferIndex %= AUDIO_SPK_BUFFER_COUNT;
}
void AndroidOutputDevice::DeviceCallback(SLAndroidSimpleBufferQueueItf bq, void* context)
{
if (!context)
return;
try
{
reinterpret_cast<AndroidOutputDevice*>(context)->handleCallback(bq);
}
catch(...)
{}
}
#endif // TARGET_ANDROID

View File

@@ -0,0 +1,147 @@
/* Copyright(C) 2007-2017 VoIP objects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef __AUDIO_ANDROID_H
#define __AUDIO_ANDROID_H
#ifdef TARGET_ANDROID
#include "Audio_Interface.h"
#include "Audio_Helper.h"
#include "Audio_Resampler.h"
#include "Audio_DataWindow.h"
#include "../Helper/HL_Pointer.h"
#include "../Helper/HL_ByteBuffer.h"
#include "../Helper/HL_Exception.h"
#include <memory>
#include <string>
#include <SLES/OpenSLES.h>
#include <SLES/OpenSLES_Android.h>
#include <SLES/OpenSLES_AndroidConfiguration.h>
namespace Audio
{
class AndroidEnumerator: public Enumerator
{
public:
AndroidEnumerator();
~AndroidEnumerator();
void open(int direction);
void close();
int count();
std::string nameAt(int index);
int idAt(int index);
int indexOfDefaultDevice();
protected:
};
class AndroidInputDevice: public InputDevice
{
public:
AndroidInputDevice(int devId);
~AndroidInputDevice();
bool open();
void close();
Format getFormat();
bool fakeMode();
void setFakeMode(bool fakemode);
int readBuffer(void* buffer);
bool active() const;
protected:
bool mActive = false;
SLObjectItf mRecorderObject = nullptr;
SLRecordItf mRecorderInterface = nullptr;
SLAndroidSimpleBufferQueueItf mRecorderBufferInterface = nullptr;
SLAndroidConfigurationItf mAndroidCfg = nullptr;
PResampler mResampler;
DataWindow mDeviceRateCache, mSdkRateCache;
int mDeviceRate; // Actual rate of opened recorder
int mBufferSize; // Size of buffer used for recording (at native sample rate)
DataWindow mRecorderBuffer;
std::condition_variable mDataCondVar;
int mRecorderBufferIndex;
std::mutex mMutex;
void internalOpen(int rateCode, int rate);
void internalClose();
void handleCallback(SLAndroidSimpleBufferQueueItf bq);
static void DeviceCallback(SLAndroidSimpleBufferQueueItf bq, void* context);
};
class AndroidOutputDevice: public OutputDevice
{
public:
AndroidOutputDevice(int devId);
~AndroidOutputDevice();
bool open();
void close();
Format getFormat();
bool fakeMode();
void setFakeMode(bool fakemode);
protected:
std::mutex mMutex;
int mDeviceRate = 0;
SLObjectItf mMixer = nullptr;
SLObjectItf mPlayer = nullptr;
SLPlayItf mPlayerControl = nullptr;
SLAndroidSimpleBufferQueueItf mBufferQueue = nullptr;
SLAndroidConfigurationItf mAndroidConfig = nullptr;
SLEffectSendItf mEffect = nullptr;
DataWindow mPlayBuffer;
int mBufferIndex = 0, mBufferSize = 0;
bool mInShutdown = false;
void internalOpen(int rateId, int rate, bool voice);
void internalClose();
void handleCallback(SLAndroidSimpleBufferQueueItf bq);
static void DeviceCallback(SLAndroidSimpleBufferQueueItf bq, void* context);
};
class OpenSLEngine: public OsEngine
{
public:
OpenSLEngine();
~OpenSLEngine();
// open() / close() methods are based on usage counting.
// It means every close() call must be matched by corresponding open() call.
// True audio engine close will happen only on last close() call.
void open() override;
void close() override;
SLEngineItf getNativeEngine() const;
static OpenSLEngine& instance();
protected:
std::mutex mMutex;
int mUsageCounter = 0;
SLObjectItf mEngineObject = nullptr;
SLEngineItf mEngineInterface = nullptr;
void internalOpen();
void internalClose();
};
}
#endif // TARGET_ANDROID
#endif // __AUDIO_ANDROID_H

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,196 @@
/* Copyright(C) 2007-2014 VoIP objects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef __AUDIO_COREAUDIO_H
#define __AUDIO_COREAUDIO_H
#ifdef TARGET_OSX
#include "Audio_Interface.h"
#include "Audio_Helper.h"
#include "Audio_Resampler.h"
#include "Audio_DataWindow.h"
#include "../Helper/HL_Pointer.h"
#include "../Helper/HL_ByteBuffer.h"
#include "../Helper/HL_Exception.h"
#include <AudioToolbox/AudioQueue.h>
// Define CoreAudio buffer time length in milliseconds
#define COREAUDIO_BUFFER_TIME 20
namespace Audio
{
class AudioException: public Exception
{
public:
AudioException(int code, OSStatus subcode)
:Exception(code, int(subcode))
{}
};
//#ifndef AudioDeviceID
//# define AudioDeviceID unsigned
//#endif
class MacEnumerator: public Enumerator
{
public:
MacEnumerator();
~MacEnumerator();
void open(int direction);
void close();
int count();
std::tstring nameAt(int index);
int idAt(int index);
int indexOfDefaultDevice();
protected:
struct DeviceInfo
{
AudioDeviceID mId;
std::string mName;
bool mCanChangeOutputVolume;
bool mCanChangeInputVolume;
int mInputCount, mOutputCount;
int mDefaultRate;
DeviceInfo(): mId(0), mCanChangeOutputVolume(false), mCanChangeInputVolume(false), mInputCount(0), mOutputCount(0), mDefaultRate(16000) {}
};
std::vector<DeviceInfo> mDeviceList;
unsigned mDefaultInput, mDefaultOutput;
int mDirection;
void getInfo(DeviceInfo& di);
};
class CoreAudioUnit
{
public:
CoreAudioUnit();
~CoreAudioUnit();
void open(bool voice);
void close();
AudioStreamBasicDescription getFormat(int scope, int bus);
void setFormat(AudioStreamBasicDescription& format, int scope, int bus);
bool getEnabled(int scope, int bus);
void setEnabled(bool enabled, int scope, int bus);
void makeCurrent(AudioDeviceID deviceId, int scope, int bus);
void setCallback(AURenderCallbackStruct cb, int callbackType, int scope, int bus);
void setBufferFrameSizeInMilliseconds(int ms);
int getBufferFrameSize();
void initialize();
AudioUnit getHandle();
protected:
AudioUnit mUnit;
};
class MacDevice
{
public:
MacDevice(int devId);
~MacDevice();
bool open();
void close();
void setRender(bool render);
void setCapture(bool capture);
int getId();
Format getFormat();
DataConnection* connection();
void setConnection(DataConnection* c);
void provideAudioToSpeaker(int channels, void* buffer, int length);
void obtainAudioFromMic(int channels, const void* buffer, int length);
protected:
AudioDeviceID mDeviceId;
bool mCapture, mRender;
bool mActive;
int mUsageCount;
Mutex mGuard;
CoreAudioUnit mAudioUnit;
AudioComponent mComponent;
AudioStreamBasicDescription mCaptureInputFormat, mCaptureOutputFormat, mRenderInputFormat, mRenderOutputFormat, mStreamFormat;
AudioBufferList* mInputBufferList;
DataConnection* mConnection;
SpeexResampler mCaptureResampler, mRenderResampler;
ByteBuffer mTail;
DataWindow mInputBuffer, mOutputBuffer;
bool createUnit(bool voice);
void destroyUnit();
void startStream();
void stopStream();
void setupStreamFormat();
bool createResampleUnit(AudioStreamBasicDescription format);
static OSStatus outputCallback( void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData );
static OSStatus inputCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData);
#ifdef TARGET_IOS
static void propListener(void *inClientData,
AudioSessionPropertyID inID,
UInt32 inDataSize,
const void * inData);
static void interruptionListener(void *inClientData, UInt32 inInterruption);
#endif
};
typedef SharedPtr<MacDevice> PMacDevice;
class MacInputDevice: public InputDevice
{
public:
MacInputDevice(int devId);
~MacInputDevice();
bool open();
void close();
Format getFormat();
bool fakeMode();
void setFakeMode(bool fakemode);
int readBuffer(void* buffer);
protected:
PMacDevice mDevice;
};
class MacOutputDevice: public OutputDevice
{
public:
MacOutputDevice(int devId);
~MacOutputDevice();
bool open();
void close();
Format getFormat();
bool fakeMode();
void setFakeMode(bool fakemode);
protected:
PMacDevice mDevice;
};
}
#endif // TARGET_OSX
#endif // __AUDIO_COREAUDIO_H

View File

@@ -0,0 +1,171 @@
/* Copyright(C) 2007-2014 VoIP objects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "Audio_DataWindow.h"
using namespace Audio;
DataWindow::DataWindow()
{
mFilled = 0;
mData = NULL;
mCapacity = 0;
}
DataWindow::~DataWindow()
{
if (mData)
free(mData);
}
void DataWindow::setCapacity(int capacity)
{
Lock l(mMutex);
int tail = capacity - mCapacity;
mData = (char*)realloc(mData, capacity);
if (tail > 0)
memset(mData + mCapacity, 0, tail);
mCapacity = capacity;
}
void DataWindow::addZero(int length)
{
Lock l(mMutex);
if (length > mCapacity)
length = mCapacity;
int avail = mCapacity - mFilled;
if (avail < length)
{
memmove(mData, mData + length - avail, mFilled - (length - avail));
mFilled -= length - avail;
}
memset(mData + mFilled, 0, length);
mFilled += length;
}
void DataWindow::add(const void* data, int length)
{
Lock l(mMutex);
if (length > mCapacity)
{
data = (char*)data + length - mCapacity;
length = mCapacity;
}
int avail = mCapacity - mFilled;
if (avail < length)
{
memmove(mData, mData + length - avail, mFilled - (length - avail));
mFilled -= length - avail;
}
memcpy(mData + mFilled, data, length);
mFilled += length;
}
void DataWindow::add(short sample)
{
add(&sample, sizeof sample);
}
void DataWindow::erase(int length)
{
Lock l(mMutex);
if (length > mFilled)
length = mFilled;
if (length != mFilled)
memmove(mData, mData + length, mFilled - length);
mFilled -= length;
}
const char* DataWindow::data() const
{
return mData;
}
char* DataWindow::mutableData()
{
return mData;
}
void DataWindow::clear()
{
Lock l(mMutex);
mFilled = 0;
}
short DataWindow::shortAt(int index) const
{
Lock l(mMutex);
assert(index < mFilled / 2);
return ((short*)mData)[index];
}
void DataWindow::setShortAt(short value, int index)
{
Lock l(mMutex);
assert(index < mFilled / 2);
((short*)mData)[index] = value;
}
int DataWindow::read(void* buffer, int length)
{
Lock l(mMutex);
if (length > mFilled)
length = mFilled;
if (length)
{
if (buffer)
memcpy(buffer, mData, length);
if (length < mFilled)
memmove(mData, mData+length, mFilled - length);
mFilled -= length;
}
return length;
}
int DataWindow::filled() const
{
Lock l(mMutex);
return mFilled;
}
void DataWindow::setFilled(int filled)
{
Lock l(mMutex);
mFilled = filled;
}
int DataWindow::capacity() const
{
Lock l(mMutex);
return mCapacity;
}
void DataWindow::zero(int length)
{
Lock l(mMutex);
assert(length <= mCapacity);
mFilled = length;
memset(mData, 0, mFilled);
}
void DataWindow::makeStereoFromMono(DataWindow& dst, DataWindow& src)
{
Lock lockDst(dst.mMutex), lockSrc(src.mMutex);
dst.setCapacity(src.filled()*2);
short* input = (short*)src.mutableData();
short* output = (short*)dst.mutableData();
for (int i=0; i<src.filled()/2; i++)
output[i*2] = output[i*2+1] = input[i];
dst.mFilled = src.filled() * 2;
}

View File

@@ -0,0 +1,47 @@
/* Copyright(C) 2007-2017 VoIPobjects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef __AUDIO_BUFFER_H
#define __AUDIO_BUFFER_H
#include "../helper/HL_ByteBuffer.h"
#include "../helper/HL_Sync.h"
namespace Audio
{
class DataWindow
{
public:
DataWindow();
~DataWindow();
void setCapacity(int capacity);
int capacity() const;
void addZero(int length);
void add(const void* data, int length);
void add(short sample);
int read(void* buffer, int length);
void erase(int length = -1);
const char* data() const;
char* mutableData();
int filled() const;
void setFilled(int filled);
void clear();
short shortAt(int index) const;
void setShortAt(short value, int index);
void zero(int length);
static void makeStereoFromMono(DataWindow& dst, DataWindow& src);
protected:
mutable Mutex mMutex;
char* mData;
int mFilled;
int mCapacity;
};
}
#endif

View File

@@ -0,0 +1,239 @@
/* Copyright(C) 2007-2017 VoIPobjects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#define NOMINMAX
#include "Audio_DevicePair.h"
#include <algorithm>
#define LOG_SUBSYSTEM "Audio"
using namespace Audio;
// --- DevicePair ---
DevicePair::DevicePair(bool aec, bool agc)
:mConfig(NULL), mDelegate(NULL), mAec(aec), mAgc(agc), mAecFilter(AUDIO_MIC_BUFFER_LENGTH*10, AUDIO_MIC_BUFFER_LENGTH, AUDIO_SAMPLERATE), mAgcFilter(AUDIO_CHANNELS)
{
mInputBuffer.setCapacity(AUDIO_MIC_BUFFER_SIZE * (AUDIO_MIC_BUFFER_COUNT + 1));
mOutputBuffer.setCapacity(AUDIO_SPK_BUFFER_SIZE * (AUDIO_SPK_BUFFER_COUNT + 1));
mInputResampingData.setCapacity(AUDIO_MIC_BUFFER_SIZE * (AUDIO_MIC_BUFFER_COUNT + 1));
mOutput10msBuffer.setCapacity((int)Format().sizeFromTime(AUDIO_SPK_BUFFER_LENGTH));
mOutputNativeData.setCapacity((int)Format().sizeFromTime(AUDIO_SPK_BUFFER_LENGTH * AUDIO_SPK_BUFFER_COUNT * 24));
}
DevicePair::~DevicePair()
{
if (mInput)
{
if (mInput->connection() == this)
mInput->setConnection(NULL);
mInput.reset();
}
if (mOutput)
{
if (mOutput->connection() == this)
mOutput->setConnection(NULL);
mOutput.reset();
}
}
VariantMap* DevicePair::config()
{
return mConfig;
}
void DevicePair::setConfig(VariantMap* config)
{
mConfig = config;
}
PInputDevice DevicePair::input()
{
return mInput;
}
void DevicePair::setInput(PInputDevice input)
{
if (mInput == input)
return;
mInput = input;
mInput->setConnection(this);
if (mDelegate)
mDelegate->deviceChanged(this);
}
POutputDevice DevicePair::output()
{
return mOutput;
}
void DevicePair::setOutput(POutputDevice output)
{
if (output == mOutput)
return;
mOutput = output;
mOutput->setConnection(this);
if (mDelegate)
mDelegate->deviceChanged(this);
}
bool DevicePair::start()
{
bool result = false;
if (mInput)
result = mInput->open();
if (mOutput && result)
result &= mOutput->open();
return result;
}
void DevicePair::stop()
{
if (mInput)
mInput->close();
if (mOutput)
mOutput->close();
}
void DevicePair::setDelegate(Delegate* dc)
{
mDelegate = dc;
}
DevicePair::Delegate* DevicePair::delegate()
{
return mDelegate;
}
Player& DevicePair::player()
{
return mPlayer;
}
void DevicePair::onMicData(const Format& f, const void* buffer, int length)
{
#ifdef DUMP_NATIVEINPUT
if (!mNativeInputDump)
{
mNativeInputDump = std::make_shared<WavFileWriter>();
mNativeInputDump->open("nativeinput.wav", f.mRate, f.mChannels);
}
if (mNativeInputDump)
mNativeInputDump->write(buffer, length);
#endif
// send the data to internal queue - it can hold data which were not processed by resampler in last call
mInputResampingData.add(buffer, length);
// split processing by blocks
int blocks = mInputResampingData.filled() / (int)f.sizeFromTime(AUDIO_MIC_BUFFER_LENGTH);
for (int blockIndex = 0; blockIndex < blocks; blockIndex++)
{
int wasProcessed = 0;
int wasProduced = mMicResampler.resample(f.mRate, // Source rate
mInputResampingData.data(), // Source data
(int)f.sizeFromTime(AUDIO_MIC_BUFFER_LENGTH), // Source size
wasProcessed,
AUDIO_SAMPLERATE, // Dest rate
mInputBuffer.mutableData() + mInputBuffer.filled(),
mInputBuffer.capacity() - mInputBuffer.filled());
mInputBuffer.setFilled(mInputBuffer.filled() + wasProduced);
mInputResampingData.erase((int)f.sizeFromTime(AUDIO_MIC_BUFFER_LENGTH));
processMicData(Format(), mInputBuffer.mutableData(), (int)Format().sizeFromTime(AUDIO_MIC_BUFFER_LENGTH));
mInputBuffer.erase((int)Format().sizeFromTime(AUDIO_MIC_BUFFER_LENGTH));
}
}
void DevicePair::onSpkData(const Format& f, void* buffer, int length)
{
//ICELogMedia(<< "Audio::DevicePair::onSpkData() begin");
#ifdef DUMP_NATIVEOUTPUT
if (!mNativeOutputDump)
{
mNativeOutputDump = std::make_shared<WavFileWriter>();
mNativeOutputDump->open("nativeoutput.wav", f.mRate, f.mChannels);
}
#endif
#ifdef CONSOLE_LOGGING
printf("Speaker requests %d\n", length);
#endif
Format nativeFormat = mOutput->getFormat();
// See how much bytes are needed yet - mOutputNativeData can contain some data already
int required = length - mOutputNativeData.filled();
if (required > 0)
{
// Find how much blocks must be received from RTP/decoder side
int nativeBufferSize = (int)nativeFormat.sizeFromTime(AUDIO_SPK_BUFFER_LENGTH);
int blocks = required / nativeBufferSize;
if (required % nativeBufferSize)
blocks++;
// Now request data from terminal or whetever delegate is
for (int blockIndex = 0; blockIndex < blocks; blockIndex++)
{
memset(mOutput10msBuffer.mutableData(), 0, (size_t)mOutput10msBuffer.capacity());
if (mDelegate)
mDelegate->onSpkData(Format(), mOutput10msBuffer.mutableData(), mOutput10msBuffer.capacity());
// Replace received data with custom file or data playing
mPlayer.onSpkData(Format(), mOutput10msBuffer.mutableData(), mOutput10msBuffer.capacity());
// Save it to process with AEC
if (mAec)
mAecSpkBuffer.add(mOutput10msBuffer.data(), mOutput10msBuffer.capacity());
// Resample these 10 milliseconds it to native format
int wasProcessed = 0;
int wasProduced = mSpkResampler.resample(AUDIO_SAMPLERATE, mOutput10msBuffer.data(), mOutput10msBuffer.capacity(), wasProcessed, f.mRate,
mOutputNativeData.mutableData() + mOutputNativeData.filled(), mOutputNativeData.capacity() - mOutputNativeData.filled());
mOutputNativeData.setFilled(mOutputNativeData.filled() + wasProduced);
#ifdef CONSOLE_LOGGING
printf("Resampled %d to %d\n", wasProcessed, wasProduced);
#endif
}
}
assert(mOutputNativeData.filled() >= length);
#ifdef DUMP_NATIVEOUTPUT
if (mNativeOutputDump)
mNativeOutputDump->write(mOutputNativeData.data(), length);
#endif
mOutputNativeData.read(buffer, length);
#define AEC_FRAME_SIZE (AUDIO_CHANNELS * (AUDIO_SAMPLERATE / 1000) * AEC_FRAME_TIME * sizeof(short))
// AEC filter wants frames.
if (mAec)
{
int nrOfFrames = mAecSpkBuffer.filled() / AEC_FRAME_SIZE;
for (int frameIndex=0; frameIndex < nrOfFrames; frameIndex++)
mAecFilter.toSpeaker(mAecSpkBuffer.mutableData() + AEC_FRAME_SIZE * frameIndex);
mAecSpkBuffer.erase(nrOfFrames * AEC_FRAME_SIZE);
}
//ICELogMedia(<< "Audio::DevicePair::onSpkData() end")
}
void DevicePair::processMicData(const Format& f, void* buffer, int length)
{
if (mAgc)
mAgcFilter.process(buffer, length);
if (mAec)
mAecFilter.fromMic(buffer);
if (mDelegate)
mDelegate->onMicData(f, buffer, length);
}

View File

@@ -0,0 +1,81 @@
/* Copyright(C) 2007-2017 VoIPobjects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef __AUDIO_DEVICEPAIR_H
#define __AUDIO_DEVICEPAIR_H
#include "Audio_Interface.h"
#include "Audio_Player.h"
#include "Audio_Resampler.h"
#include "Audio_DataWindow.h"
//#define DUMP_NATIVEOUTPUT
//#define DUMP_NATIVEINPUT
namespace Audio
{
class DevicePair: protected DataConnection
{
public:
class Delegate: public DataConnection
{
public:
virtual void deviceChanged(DevicePair* dpair) = 0;
};
DevicePair(bool aec = true, bool agc = true);
virtual ~DevicePair();
void setAec(bool aec);
bool aec();
void setAgc(bool agc);
bool agc();
VariantMap* config();
void setConfig(VariantMap* config);
PInputDevice input();
void setInput(PInputDevice input);
POutputDevice output();
void setOutput(POutputDevice output);
bool start();
void stop();
void setDelegate(Delegate* dc);
Delegate* delegate();
Player& player();
protected:
VariantMap* mConfig;
PInputDevice mInput;
POutputDevice mOutput;
Delegate* mDelegate;
bool mAec;
bool mAgc;
AgcFilter mAgcFilter;
AecFilter mAecFilter;
Player mPlayer;
UniversalResampler mMicResampler, mSpkResampler;
DataWindow mInputBuffer, mOutputBuffer, mAecSpkBuffer, mInputResampingData, mOutputNativeData, mOutput10msBuffer;
#ifdef DUMP_NATIVEOUTPUT
std::shared_ptr<WavFileWriter> mNativeOutputDump;
#endif
#ifdef DUMP_NATIVEINPUT
std::shared_ptr<WavFileWriter> mNativeInputDump;
#endif
void onMicData(const Format& f, const void* buffer, int length);
void onSpkData(const Format& f, void* buffer, int length);
void processMicData(const Format& f, void* buffer, int length);
};
typedef std::shared_ptr<DevicePair> PDevicePair;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,187 @@
/* Copyright(C) 2007-2017 VoIP objects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef __AUDIO_DSOUND_H
#define __AUDIO_DSOUND_H
#include "../config.h"
#include <winsock2.h>
#include <windows.h>
#include <mmsystem.h>
#include "../Helper/HL_Sync.h"
#include "../Helper/HL_ByteBuffer.h"
#include "Audio_WavFile.h"
#include "Audio_Interface.h"
#include "Audio_Helper.h"
#include <deque>
#include <EndpointVolume.h>
#include <MMDeviceAPI.h>
#if defined(_MSC_VER)
# include <Functiondiscoverykeys_devpkey.h>
#endif
#include <vector>
#include <string>
#include <InitGuid.h>
#include <dsound.h>
namespace Audio
{
class VistaEnumerator: public Enumerator
{
public:
VistaEnumerator();
~VistaEnumerator();
void open(int direction);
void close();
int count();
std::tstring nameAt(int index);
int idAt(int index);
int indexOfDefaultDevice();
protected:
IMMDeviceCollection* mCollection;
IMMDevice* mDefaultDevice;
IMMDeviceEnumerator* mEnumerator;
EDataFlow mDirection;
std::vector<std::wstring> mNameList;
void enumerate();
IMMDevice* mapIndexToInterface(int index);
};
class XpEnumerator: public Enumerator
{
public:
XpEnumerator();
~XpEnumerator();
void open(int direction);
void close();
int count();
std::tstring nameAt(int index);
int idAt(int index);
int indexOfDefaultDevice();
protected:
std::vector<std::wstring> mNameList;
int mDirection;
};
class DSoundHelper
{
public:
static void checkComResult(HRESULT code);
static GUID deviceId2Guid(int deviceId, bool captureDevice);
};
#if !defined(_MSC_VER)
typedef struct IDirectSoundNotify8 *LPDIRECTSOUNDNOTIFY8;
#endif
class DSoundInputDevice: public InputDevice
{
public:
DSoundInputDevice(GUID deviceId);
~DSoundInputDevice();
void enableDenoiser(bool enable);
bool open();
void close();
bool isSimulate() const;
void setSimulate(bool s);
int readBuffer(void* buffer);
Format getFormat();
protected:
Mutex mGuard; /// Mutex to protect this instance.
LPDIRECTSOUNDCAPTURE8 mDevice;
LPDIRECTSOUNDCAPTUREBUFFER8 mBuffer;
LPDIRECTSOUNDNOTIFY8 mNotifications;
DSBPOSITIONNOTIFY mEventArray[AUDIO_MIC_BUFFER_COUNT];
HANDLE mEventSignals[AUDIO_MIC_BUFFER_COUNT]; // Helper array to make WaitForMultipleObjects in loop
int mBufferIndex;
int mNextBuffer;
GUID mGUID;
HANDLE mThreadHandle;
HANDLE mShutdownSignal;
volatile bool mSimulate; /// Marks if simulate mode is active.
int mRefCount;
ByteBuffer mQueue;
unsigned mReadOffset;
DenoiseFilter mDenoiser;
volatile bool mEnableDenoiser;
char mTempBuffer[AUDIO_MIC_BUFFER_SIZE];
StubTimer mNullAudio;
#ifdef AUDIO_DUMPINPUT
WavFileWriter mDump;
#endif
bool tryReadBuffer(void* buffer);
void openDevice();
void closeDevice();
static void threadProc(void* arg);
};
class DSoundOutputDevice: public OutputDevice
{
public:
DSoundOutputDevice(GUID deviceId);
~DSoundOutputDevice();
bool open();
void close();
unsigned playedTime() const;
bool isSimulate() const;
void setSimulate(bool s);
bool closing();
Format getFormat();
protected:
Mutex mGuard; /// Mutex to protect this instance
int mDeviceID;
LPDIRECTSOUND8 mDevice;
LPDIRECTSOUNDBUFFER mPrimaryBuffer;
LPDIRECTSOUNDBUFFER mBuffer;
GUID mGUID;
unsigned mWriteOffset;
unsigned mPlayedSamples;
unsigned mSentBytes;
DWORD mPlayCursor; // Measured in bytes
unsigned mBufferSize;
unsigned mTotalPlayed; // Measured in bytes
unsigned mTail; // Measured in bytes
HANDLE mShutdownSignal;
HANDLE mBufferSignal;
HANDLE mThreadHandle;
bool mSimulate;
StubTimer mNullAudio;
DWORD mWriteCursor;
char mMediaFrame[AUDIO_SPK_BUFFER_SIZE];
unsigned mRefCount;
void openDevice();
void closeDevice();
void restoreBuffer();
bool process();
bool getMediaFrame();
static void threadProc(void* arg);
};
}
#endif

View File

@@ -0,0 +1,148 @@
/* Copyright(C) 2007-2014 VoIP objects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifdef TARGET_WIN
# include <WinSock2.h>
#endif
#include "Audio_Helper.h"
#include "../helper/HL_Exception.h"
using namespace Audio;
// --- QPCSource
TimeSource::TimeSource(int quantTime, int nrOfQuants)
{
#ifdef TARGET_WIN
mCounter.QuadPart = 0;
#endif
#if defined(TARGET_OSX) || defined(TARGET_IOS)
mach_timebase_info(&mTimebase);
mRatio = ((double)mTimebase.numer / (double)mTimebase.denom) / 1000000;
#endif
mQuantTime = quantTime;
mDepthTime = quantTime * nrOfQuants;
mTailTime = 0;
}
TimeSource::~TimeSource()
{
}
void TimeSource::start()
{
#ifdef TARGET_WIN
if (!QueryPerformanceFrequency(&mFreq))
throw Exception(ERR_QPC, GetLastError());
if (!QueryPerformanceCounter(&mCounter))
throw Exception(ERR_QPC, GetLastError());
#endif
}
void TimeSource::stop()
{
}
unsigned TimeSource::time()
{
#ifdef TARGET_WIN
LARGE_INTEGER c;
if (!QueryPerformanceCounter(&c))
throw Exception(ERR_QPC, GetLastError());
//find the f
double f = (double)mFreq.QuadPart / 1000.0;
//find the difference
unsigned __int64 diff = c.QuadPart - mCounter.QuadPart;
mCounter.QuadPart = c.QuadPart;
diff = (unsigned __int64)((double)diff / f + 0.5); //get ms
diff += mTailTime;
if (diff > mDepthTime)
{
mTailTime = 0;
return mDepthTime;
}
else
{
mTailTime = (unsigned )(diff % (unsigned __int64)mQuantTime);
unsigned int t = (unsigned )(diff / (unsigned __int64)mQuantTime);
return t * mQuantTime;
}
#endif
#if defined(TARGET_OSX) || defined(TARGET_IOS)
uint64_t t = mach_absolute_time();
uint64_t c = uint64_t((double)t * mRatio + 0.5);
uint64_t diff = c - this->mTime + mTailTime;
mTime = c;
if (diff > mDepthTime)
{
mTailTime = 0;
return mDepthTime;
}
else
{
mTailTime = diff % mQuantTime;
uint64_t t = diff / mQuantTime;
return t * mQuantTime;
}
#endif
#if defined(TARGET_LINUX)
assert(0);
#endif
}
// --- StubTimer ---
StubTimer::StubTimer(int bufferTime, int bufferCount)
:mBufferTime(bufferTime), mBufferCount(bufferCount), mTimeSource(bufferTime, bufferCount), mActive(false)
{
#ifdef TARGET_WIN
mStubSignal = ::CreateEvent(NULL, FALSE, FALSE, NULL);
#endif
}
StubTimer::~StubTimer()
{
#ifdef TARGET_WIN
::CloseHandle(mStubSignal);
#endif
}
void StubTimer::start()
{
mTimeSource.start();
mCurrentTime = mTimeSource.time();
mActive = true;
}
void StubTimer::stop()
{
mTimeSource.stop();
mActive = false;
}
void StubTimer::waitForBuffer()
{
if (!mActive)
start();
unsigned t = mTimeSource.time();
while (!t)
{
#ifdef TARGET_WIN
::WaitForSingleObject(mStubSignal, mBufferTime);
#endif
#if defined(TARGET_OSX) || defined(TARGET_IOS)
usleep(100);
#endif
t = mTimeSource.time();
}
}

View File

@@ -0,0 +1,78 @@
/* Copyright(C) 2007-2014 VoIP objects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef __AUDIO_HELPER_H
#define __AUDIO_HELPER_H
#ifdef TARGET_WIN
#include <EndpointVolume.h>
#include <MMDeviceAPI.h>
#if defined(_MSC_VER)
# include <Functiondiscoverykeys_devpkey.h>
#endif
#endif
#if defined(TARGET_OSX) || defined(TARGET_IOS)
# include <AudioUnit/AudioUnit.h>
# include <AudioToolbox/AudioConverter.h>
# include <AudioToolbox/AudioServices.h>
# include <mach/mach_time.h>
#endif
#include <vector>
#include "Audio_Interface.h"
namespace Audio
{
class TimeSource
{
protected:
#ifdef TARGET_WIN
LARGE_INTEGER mCounter; /// Current value from QPC.
LARGE_INTEGER mFreq; /// Current frequency from QPC.
#endif
#if defined(TARGET_OSX) || defined(TARGET_IOS)
uint64_t mTime;
struct mach_timebase_info mTimebase;
double mRatio;
#endif
unsigned mQuantTime; /// Used time quants length in milliseconds.
unsigned mDepthTime; /// Number of available time quants.
unsigned mTailTime; /// Not-accounted milliseconds.
public:
TimeSource(int quantTime, int nrOfQuants);
~TimeSource();
void start();
void stop();
unsigned time();
};
class StubTimer
{
public:
StubTimer(int bufferTime, int bufferCount);
~StubTimer();
void start();
void stop();
void waitForBuffer();
protected:
unsigned mBufferTime;
unsigned mBufferCount;
unsigned mCurrentTime;
TimeSource mTimeSource;
#ifdef TARGET_WIN
HANDLE mStubSignal;
#endif
bool mActive;
};
}
#endif

View File

@@ -0,0 +1,153 @@
/* Copyright(C) 2007-2017 VoIP objects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "Audio_Interface.h"
#include "../helper/HL_OsVersion.h"
#if !defined(USE_NULL_AUDIO)
# ifdef TARGET_WIN
# include "Audio_Wmme.h"
# include "Audio_DirectSound.h"
# endif
# ifdef TARGET_OSX
# include "Audio_CoreAudio.h"
# endif
# ifdef TARGET_ANDROID
# include "Audio_Android.h"
# endif
#endif
#include "Audio_Helper.h"
#include "Audio_Null.h"
using namespace Audio;
Device::Device()
:mConnection(nullptr)
{
}
Device::~Device()
{
}
void Device::setConnection(DataConnection* connection)
{
mConnection = connection;
}
DataConnection* Device::connection()
{
return mConnection;
}
InputDevice::InputDevice()
{
}
InputDevice::~InputDevice()
{
}
InputDevice* InputDevice::make(int devId)
{
#if defined(USE_NULL_AUDIO)
return new NullInputDevice();
#else
#if defined(TARGET_WIN) && defined(_MSC_VER)
// return new WmmeInputDevice(index);
return new DSoundInputDevice(DSoundHelper::deviceId2Guid(devId, true));
#endif
#ifdef TARGET_OSX
return new MacInputDevice(devId);
#endif
#ifdef TARGET_ANDROID
return new AndroidInputDevice(devId);
#endif
#endif
return nullptr;
}
OutputDevice::OutputDevice()
{
}
OutputDevice::~OutputDevice()
{
}
OutputDevice* OutputDevice::make(int devId)
{
#if defined(USE_NULL_AUDIO)
return new NullOutputDevice();
#else
#if defined(TARGET_WIN)
//return new WmmeOutputDevice(index);
return new DSoundOutputDevice(DSoundHelper::deviceId2Guid(devId, false));
#endif
#ifdef TARGET_OSX
return new MacOutputDevice(devId);
#endif
#ifdef TARGET_ANDROID
return new AndroidOutputDevice(devId);
#endif
#endif
return nullptr;
}
// --- Enumerator ---
Enumerator::Enumerator()
{
}
Enumerator::~Enumerator()
{
}
int Enumerator::nameToIndex(const std::tstring& name)
{
for (int i = 0; i < count(); i++)
if (nameAt(i) == name)
return i;
return -1;
}
Enumerator* Enumerator::make(bool useNull)
{
if (useNull)
return new NullEnumerator();
#ifndef USE_NULL_AUDIO
#ifdef TARGET_WIN
if (winVersion() > Win_Xp)
return new VistaEnumerator();
else
return new XpEnumerator();
#endif
#ifdef TARGET_OSX
return new MacEnumerator();
#endif
#endif
return new NullEnumerator();
}
// ----- OsEngine ------------
OsEngine* OsEngine::instance()
{
#ifdef USE_NULL_AUDIO
return nullptr;
#endif
#ifdef TARGET_ANDROID
return &OpenSLEngine::instance();
#endif
return nullptr;
}

View File

@@ -0,0 +1,135 @@
/* Copyright(C) 2007-2016 VoIP objects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef __AUDIO_INTERFACE_H
#define __AUDIO_INTERFACE_H
#include <string>
#include "../config.h"
#include "../helper/HL_Types.h"
#include "../helper/HL_VariantMap.h"
#include "../helper/HL_Pointer.h"
#include "Audio_WavFile.h"
#include "Audio_Quality.h"
namespace Audio
{
enum
{
myMicrophone = 1,
mySpeaker = 2
};
struct Format
{
int mRate;
int mChannels;
Format()
:mRate(AUDIO_SAMPLERATE), mChannels(AUDIO_CHANNELS)
{}
Format(int rate, int channels)
:mRate(rate), mChannels(channels)
{}
int samplesFromSize(int length) const
{
return length / 2 / mChannels;
}
// Returns milliseconds
float timeFromSize(int length) const
{
return float(samplesFromSize(length) / (mRate / 1000.0));
}
float sizeFromTime(int milliseconds) const
{
return float((milliseconds * mRate) / 500.0 * mChannels);
}
std::string toString()
{
char buffer[64];
sprintf(buffer, "%dHz %dch", mRate, mChannels);
return std::string(buffer);
}
};
class DataConnection
{
public:
virtual void onMicData(const Format& format, const void* buffer, int length) = 0;
virtual void onSpkData(const Format& format, void* buffer, int length) = 0;
};
class Device
{
public:
Device();
virtual ~Device();
void setConnection(DataConnection* connection);
DataConnection* connection();
virtual bool open() = 0;
virtual void close() = 0;
virtual Format getFormat() = 0;
protected:
DataConnection* mConnection;
};
class InputDevice: public Device
{
public:
InputDevice();
virtual ~InputDevice();
static InputDevice* make(int devId);
};
typedef std::shared_ptr<InputDevice> PInputDevice;
class OutputDevice: public Device
{
public:
OutputDevice();
virtual ~OutputDevice();
static OutputDevice* make(int devId);
};
typedef std::shared_ptr<OutputDevice> POutputDevice;
class Enumerator
{
public:
Enumerator();
virtual ~Enumerator();
int nameToIndex(const std::tstring& name);
virtual void open(int direction) = 0;
virtual void close() = 0;
virtual int count() = 0;
virtual std::tstring nameAt(int index) = 0;
virtual int idAt(int index) = 0;
virtual int indexOfDefaultDevice() = 0;
static Enumerator* make(bool useNull = false);
};
class OsEngine
{
public:
virtual void open() = 0;
virtual void close() = 0;
static OsEngine* instance();
};
};
#endif

View File

@@ -0,0 +1,330 @@
/* Copyright(C) 2007-2014 VoIP objects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../config.h"
#include "../helper/HL_Exception.h"
#include "Audio_Mixer.h"
#include <algorithm>
#include "../helper/HL_Log.h"
#define LOG_SUBSYSTEM "Mixer"
using namespace Audio;
Mixer::Stream::Stream()
{
mResampler8.start(AUDIO_CHANNELS, 8000, AUDIO_SAMPLERATE);
mResampler16.start(AUDIO_CHANNELS, 16000, AUDIO_SAMPLERATE);
mResampler32.start(AUDIO_CHANNELS, 32000, AUDIO_SAMPLERATE);
mResampler48.start(AUDIO_CHANNELS, 48000, AUDIO_SAMPLERATE);
mActive = false;
mContext = NULL;
mSSRC = 0;
mFadeOutCounter = 0;
mData.setCapacity(AUDIO_SPK_BUFFER_SIZE * AUDIO_SPK_BUFFER_COUNT);
}
Mixer::Stream::~Stream()
{
}
void Mixer::Stream::setSsrc(unsigned ssrc)
{
mSSRC = ssrc;
}
unsigned Mixer::Stream::ssrc()
{
return mSSRC;
}
void Mixer::Stream::setContext(void* context)
{
mContext = context;
}
void* Mixer::Stream::context()
{
return mContext;
}
DataWindow& Mixer::Stream::data()
{
return mData;
}
bool Mixer::Stream::active()
{
return mActive;
}
void Mixer::Stream::setActive(bool active)
{
mActive = active;
}
void Mixer::Stream::addPcm(int rate, const void* input, int length)
{
// Resample to internal sample rate
unsigned outputSize = unsigned(0.5 + length * ((float)AUDIO_SAMPLERATE / rate));
if (mTempBuffer.size() < outputSize)
mTempBuffer.resize(outputSize);
Resampler* resampler = (rate == 8000) ? &mResampler8 : ((rate == 16000) ? &mResampler16 : ((rate == 32000) ? &mResampler32 : &mResampler48));
int inputProcessed = 0;
resampler->processBuffer(input, length, inputProcessed, mTempBuffer.mutableData(), outputSize);
// inputProcessed result value is ignored here - rate will be 8/16/32/48k, inputProcessed is equal to length
// Queue data
mData.add(mTempBuffer.data(), outputSize);
}
Mixer::Mixer()
{
mActiveCounter = 0;
mOutput.setCapacity(32768);
}
Mixer::~Mixer()
{
}
void Mixer::unregisterChannel(void* channel)
{
for (int i=0; i<AUDIO_MIX_CHANNEL_COUNT; i++)
{
Stream& c = mChannelList[i];
if (c.active() && c.context() == channel)
{
c.setActive(false); // stream is not active anymore
c.data().clear(); // clear data
mActiveCounter--;
}
}
}
void Mixer::clear(void* context, unsigned ssrc)
{
for (int i=0; i<AUDIO_MIX_CHANNEL_COUNT; i++)
{
Stream& c = mChannelList[i];
if (c.active() && c.context() == context && c.ssrc() == ssrc)
{
c.setActive(false);
c.data().clear();
mActiveCounter--;
}
}
}
Mixer::Stream* Mixer::allocateChannel(void* context, unsigned ssrc)
{
// Allocate new channel
Lock l(mMutex);
Stream* channel;
for (int i=0; i<AUDIO_MIX_CHANNEL_COUNT;i++)
{
channel = &mChannelList[i];
if (!channel->active())
{
channel->setSsrc(ssrc);
channel->setContext(context);
channel->data().clear();
mActiveCounter++;
channel->setActive(true);
return channel;
}
}
return NULL;
}
void Mixer::addPcm(void* context, unsigned ssrc,
const void* inputData, int inputLength,
int inputRate, bool fadeOut)
{
assert(inputRate == 8000 || inputRate == 16000 || inputRate == 32000);
int i;
// Locate a channel
Stream* channel = NULL;
for (i=0; i<AUDIO_MIX_CHANNEL_COUNT && !channel; i++)
{
Stream& c = mChannelList[i];
if (c.active() && c.context() == context && c.ssrc() == ssrc)
channel = &c;
}
if (!channel)
{
channel = allocateChannel(context, ssrc);
if (!channel)
throw Exception(ERR_MIXER_OVERFLOW);
}
channel->addPcm(inputRate, inputData, inputLength);
}
void Mixer::addPcm(void* context, unsigned ssrc, Audio::DataWindow& w, int rate, bool fadeOut)
{
assert(rate == 8000 || rate == 16000 || rate == 32000 || rate == 48000);
int i;
// Locate a channel
Stream* channel = NULL;
for (i=0; i<AUDIO_MIX_CHANNEL_COUNT && !channel; i++)
{
Stream& c = mChannelList[i];
if (c.active() && c.context() == context && c.ssrc() == ssrc)
channel = &c;
}
if (!channel)
{
channel = allocateChannel(context, ssrc);
if (!channel)
throw Exception(ERR_MIXER_OVERFLOW);
}
channel->addPcm(rate, w.data(), w.filled());
//ICELogCritical(<<"Mixer stream " << int(this) << " has " << w.filled() << " bytes");
}
void Mixer::mix()
{
// Current sample
int sample = 0;
// Counter of processed active channels
int processed = 0;
// Samples & sources counters
unsigned sampleCounter = 0, sourceCounter;
short outputBuffer[512];
unsigned outputCounter = 0;
// Build active channel map
Stream* channelList[AUDIO_MIX_CHANNEL_COUNT];
int activeCounter = 0;
for (int i=0; i<AUDIO_MIX_CHANNEL_COUNT; i++)
if (mChannelList[i].active())
channelList[activeCounter++] = &mChannelList[i];
// No active channels - nothing to mix - exit
if (!activeCounter)
{
//ICELogCritical(<< "No active channel");
return;
}
// Optimized versions for 1& 2 active channels
if (activeCounter == 1)
{
// Copy much samples as we have
Stream& audio = *channelList[0];
mOutput.add(audio.data().data(), audio.data().filled());
audio.data().erase(audio.data().filled());
//ICELogCritical(<<"Length of mixer stream " << audio.data().filled());
}
else
if (activeCounter == 2)
{
Stream& audio1 = *channelList[0];
Stream& audio2 = *channelList[1];
int filled1 = audio1.data().filled() / 2, filled2 = audio2.data().filled() / 2;
int available = filled1 > filled2 ? filled1 : filled2;
// Find how much samples can be mixed
int filled = mOutput.filled() / 2;
int maxsize = mOutput.capacity() / 2;
if (maxsize - filled < available)
available = maxsize - filled;
short sample = 0;
for (int i=0; i<available; i++)
{
short sample1 = filled1 > i ? audio1.data().shortAt(i) : 0;
short sample2 = filled2 > i ? audio2.data().shortAt(i) : 0;
sample = (abs(sample1) > abs(sample2)) ? sample1 : sample2;
mOutput.add(sample);
}
audio1.data().erase(available*2);
audio2.data().erase(available*2);
}
else
{
do
{
sample = 0;
sourceCounter = 0;
processed = 0;
for (int i=0; i<activeCounter; i++)
{
Stream& audio = *channelList[i];
processed++;
if (audio.data().filled() > (int)sampleCounter * 2)
{
short currentSample = audio.data().shortAt(sampleCounter);
if (abs(currentSample) > abs(sample))
sample = currentSample;
sourceCounter++;
}
}
if (sourceCounter)
{
outputBuffer[outputCounter++] = (short)sample;
sampleCounter++;
}
// Check if time to flash output buffer
if ((!sourceCounter || outputCounter == 512) && outputCounter)
{
mOutput.add(outputBuffer, outputCounter * 2);
outputCounter = 0;
}
}
while (sourceCounter);
processed = 0;
for (int i=0; i<activeCounter; i++)
{
Stream& audio = *channelList[i];
audio.data().erase(sampleCounter*2);
}
}
}
int Mixer::getPcm(void* outputData, int outputLength)
{
if (mOutput.filled() < outputLength)
mix();
//ICELogCritical(<<"Mixer has " << mOutput.filled() << " available bytes");
memset(outputData, 0, outputLength);
return mOutput.read(outputData, outputLength);
}
int Mixer::mixAndGetPcm(Audio::DataWindow& output)
{
// Mix
mix();
// Set output space
output.setCapacity(mOutput.filled());
// Read mixed data to output
return mOutput.read(output.mutableData(), output.capacity());
}
int Mixer::available()
{
return mOutput.filled();
}

View File

@@ -0,0 +1,72 @@
/* Copyright(C) 2007-2017 VoIPobjects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef _RX_MIXER_H
#define _RX_MIXER_H
#include "../config.h"
#include "../helper/HL_ByteBuffer.h"
#include "../helper/HL_Sync.h"
#include "Audio_Resampler.h"
#include "Audio_DataWindow.h"
#include <map>
#include <atomic>
namespace Audio
{
class Mixer
{
protected:
class Stream
{
protected:
DataWindow mData;
Resampler mResampler8,
mResampler16,
mResampler32,
mResampler48;
bool mActive;
void* mContext;
unsigned mSSRC;
unsigned mFadeOutCounter;
ByteBuffer mTempBuffer;
public:
Stream();
~Stream();
void setSsrc(unsigned ssrc);
unsigned ssrc();
void setContext(void* context);
void* context();
DataWindow& data();
bool active();
void setActive(bool active);
void addPcm(int rate, const void* input, int length);
};
Stream mChannelList[AUDIO_MIX_CHANNEL_COUNT];
Mutex mMutex;
DataWindow mOutput;
std::atomic_int mActiveCounter;
void mix();
Stream* allocateChannel(void* context, unsigned ssrc);
public:
Mixer();
~Mixer();
void unregisterChannel(void* context);
void clear(void* context, unsigned ssrc);
void addPcm(void* context, unsigned ssrc, const void* inputData, int inputLength, int inputRate, bool fadeOut);
void addPcm(void* context, unsigned ssrc, Audio::DataWindow& w, int rate, bool fadeOut);
int getPcm(void* outputData, int outputLength);
int mixAndGetPcm(Audio::DataWindow& output);
int available();
};
} //end of namespace
#endif

View File

@@ -0,0 +1,177 @@
#include "Audio_Null.h"
#include "helper/HL_Log.h"
#define LOG_SUBSYSTEM "NULL audio"
using namespace Audio;
NullTimer::NullTimer(int interval, Delegate *delegate, const char* name)
:mShutdown(false), mDelegate(delegate), mInterval(interval), mThreadName(name)
{
start();
}
NullTimer::~NullTimer()
{
stop();
}
void NullTimer::start()
{
mShutdown = false;
mWorkerThread = std::thread(&NullTimer::run, this);
}
void NullTimer::stop()
{
mShutdown = true;
if (mWorkerThread.joinable())
mWorkerThread.join();
}
void NullTimer::run()
{
mTail = 0;
while (!mShutdown)
{
// Get current timestamp
std::chrono::system_clock::time_point timestamp = std::chrono::system_clock::now();
while (mTail >= mInterval * 1000)
{
if (mDelegate)
mDelegate->onTimerSignal(*this);
mTail -= mInterval * 1000;
}
// Sleep for mInterval - mTail milliseconds
std::this_thread::sleep_for(std::chrono::microseconds(mInterval * 1000 - mTail));
mTail += std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now() - timestamp).count();
}
}
// --------------------- NullInputDevice -------------------------
NullInputDevice::NullInputDevice()
:mBuffer(nullptr)
{
}
NullInputDevice::~NullInputDevice()
{
close();
}
bool NullInputDevice::open()
{
mBuffer = malloc(AUDIO_MIC_BUFFER_SIZE);
memset(mBuffer, 0, AUDIO_MIC_BUFFER_SIZE);
mTimeCounter = 0; mDataCounter = 0;
// Creation of timer starts it also. So first onTimerSignal can come even before open() returns.
mTimer = std::make_shared<NullTimer>(AUDIO_MIC_BUFFER_LENGTH, this, "NullMicrophoneThread");
return true;
}
void NullInputDevice::close()
{
mTimer.reset();
if (mBuffer)
{
free(mBuffer);
mBuffer = nullptr;
}
ICELogInfo(<<"Pseudocaptured " << mTimeCounter << " milliseconds , " << mDataCounter << " bytes.");
}
Format NullInputDevice::getFormat()
{
assert (Format().sizeFromTime(AUDIO_MIC_BUFFER_LENGTH) == AUDIO_MIC_BUFFER_SIZE);
return Format();
}
void NullInputDevice::onTimerSignal(NullTimer& timer)
{
mTimeCounter += AUDIO_MIC_BUFFER_LENGTH;
mDataCounter += AUDIO_MIC_BUFFER_SIZE;
if (mConnection)
mConnection->onMicData(getFormat(), mBuffer, AUDIO_MIC_BUFFER_SIZE);
}
// --------------------- NullOutputDevice --------------------------
NullOutputDevice::NullOutputDevice()
:mBuffer(nullptr)
{
}
NullOutputDevice::~NullOutputDevice()
{
close();
}
bool NullOutputDevice::open()
{
mTimeCounter = 0; mDataCounter = 0;
mBuffer = malloc(AUDIO_SPK_BUFFER_SIZE);
// Creation of timer starts it also. So first onSpkData() can come before open() returns even.
mTimer = std::make_shared<NullTimer>(AUDIO_SPK_BUFFER_LENGTH, this, "NullSpeakerThread");
return true;
}
void NullOutputDevice::close()
{
mTimer.reset();
free(mBuffer); mBuffer = nullptr;
ICELogInfo(<< "Pseudoplayed " << mTimeCounter << " milliseconds, " << mDataCounter << " bytes.");
}
Format NullOutputDevice::getFormat()
{
assert (Format().sizeFromTime(AUDIO_SPK_BUFFER_LENGTH) == AUDIO_SPK_BUFFER_SIZE);
return Format();
}
void NullOutputDevice::onTimerSignal(NullTimer &timer)
{
mTimeCounter += AUDIO_SPK_BUFFER_LENGTH;
mDataCounter += AUDIO_SPK_BUFFER_SIZE;
if (mConnection)
mConnection->onSpkData(getFormat(), mBuffer, AUDIO_SPK_BUFFER_SIZE);
}
// ---------------------- NullEnumerator --------------------------
NullEnumerator::NullEnumerator()
{}
NullEnumerator::~NullEnumerator()
{}
void NullEnumerator::open(int direction)
{}
void NullEnumerator::close()
{}
int NullEnumerator::count()
{
return 1;
}
std::tstring NullEnumerator::nameAt(int index)
{
#if defined(TARGET_WIN)
return L"null";
#else
return "null";
#endif
}
int NullEnumerator::idAt(int index)
{
return 0;
}
int NullEnumerator::indexOfDefaultDevice()
{
return 0;
}

View File

@@ -0,0 +1,86 @@
#ifndef __AUDIO_NULL_H
#define __AUDIO_NULL_H
#include <thread>
#include "Audio_Interface.h"
namespace Audio
{
class NullTimer
{
public:
class Delegate
{
public:
virtual void onTimerSignal(NullTimer& timer) = 0;
};
protected:
std::thread mWorkerThread;
volatile bool mShutdown;
Delegate* mDelegate;
int mInterval, // Interval - wanted number of milliseconds
mTail; // Number of milliseconds that can be sent immediately to sink
std::string mThreadName;
void start();
void stop();
void run();
public:
/* Interval is in milliseconds. */
NullTimer(int interval, Delegate* delegate, const char* name = nullptr);
~NullTimer();
};
class NullInputDevice: public InputDevice, public NullTimer::Delegate
{
protected:
void* mBuffer = nullptr;
std::shared_ptr<NullTimer> mTimer;
int64_t mTimeCounter = 0, mDataCounter = 0;
public:
NullInputDevice();
virtual ~NullInputDevice();
bool open() override;
void close() override;
Format getFormat() override;
void onTimerSignal(NullTimer& timer) override;
};
class NullOutputDevice: public OutputDevice, public NullTimer::Delegate
{
protected:
std::shared_ptr<NullTimer> mTimer;
void* mBuffer = nullptr;
int64_t mDataCounter = 0, mTimeCounter = 0;
public:
NullOutputDevice();
virtual ~NullOutputDevice();
bool open() override;
void close() override;
Format getFormat() override;
void onTimerSignal(NullTimer& timer) override;
};
class NullEnumerator: public Enumerator
{
public:
NullEnumerator();
~NullEnumerator();
void open(int direction) override;
void close() override;
int count() override;
std::tstring nameAt(int index) override;
int idAt(int index) override;
int indexOfDefaultDevice() override;
};
}
#endif

View File

@@ -0,0 +1,167 @@
/* Copyright(C) 2007-2014 VoIP objects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "Audio_Player.h"
using namespace Audio;
// -------------- Player -----------
Player::Player()
:mDelegate(NULL), mPlayedTime(0)
{
}
Player::~Player()
{
}
void Player::setDelegate(EndOfAudioDelegate* d)
{
mDelegate = d;
}
Player::EndOfAudioDelegate* Player::getDelegate() const
{
return mDelegate;
}
void Player::setOutput(POutputDevice output)
{
mOutput = output;
if (mOutput)
mOutput->setConnection(this);
}
POutputDevice Player::getOutput() const
{
return mOutput;
}
void Player::onMicData(const Format& f, const void* buffer, int length)
{
// Do nothing here - this data sink is not used in player
}
#define BYTES_PER_MILLISECOND (AUDIO_SAMPLERATE / 1000 * 2 * AUDIO_CHANNELS)
void Player::onSpkData(const Format& f, void* buffer, int length)
{
Lock l(mGuard);
// Fill buffer by zero if player owns dedicated device
if (mOutput)
memset(buffer, 0, length);
// See if there is item in playlist
int produced = 0;
while (mPlaylist.size() && produced < length)
{
PlaylistItem& item = mPlaylist.front();
// Check for timelength
if (item.mTimelength > 0 && item.mTimelength < mPlayedTime)
{
onFilePlayed();
continue;
}
int wasread = item.mFile->read((char*)buffer+produced, length-produced);
mPlayedTime += float(wasread) / BYTES_PER_MILLISECOND;
produced += wasread;
if (wasread < length-produced)
{
if (item.mLoop)
{
item.mFile->rewind();
wasread = item.mFile->read((char*)buffer+produced, (length - produced));
mPlayedTime += float(wasread) / BYTES_PER_MILLISECOND;
produced += wasread;
}
else
onFilePlayed();
}
}
}
void Player::onFilePlayed()
{
// Save usage id to release later from main loop
mFinishedUsages.push_back(mPlaylist.front().mUsageId);
// Send event
if (mDelegate)
mDelegate->onFilePlayed(mPlaylist.front());
// Remove played item & reset played time
mPlaylist.pop_front();
mPlayedTime = 0;
}
void Player::obtain(int usage)
{
Lock l(mGuard);
UsageMap::iterator usageIter = mUsage.find(usage);
if (usageIter == mUsage.end())
mUsage[usage] = 1;
else
usageIter->second = usageIter->second + 1;
if (mUsage.size() == 1 && mOutput)
mOutput->open();
}
void Player::release(int usage)
{
Lock l(mGuard);
UsageMap::iterator usageIter = mUsage.find(usage);
if (usageIter == mUsage.end())
return;
usageIter->second = usageIter->second - 1;
if (!usageIter->second)
mUsage.erase(usageIter);
for (unsigned i=0; i<mPlaylist.size(); i++)
if (mPlaylist[i].mUsageId == usage)
mPlaylist.erase(mPlaylist.begin() + i);
if (mUsage.empty() && mOutput)
mOutput->close();
}
int Player::releasePlayed()
{
Lock l(mGuard);
int result = mFinishedUsages.size();
while (mFinishedUsages.size())
{
release(mFinishedUsages.front());
mFinishedUsages.erase(mFinishedUsages.begin());
}
return result;
}
void Player::add(int usageId, PWavFileReader file, bool loop, int timelength)
{
Lock l(mGuard);
PlaylistItem item;
item.mFile = file;
item.mLoop = loop;
item.mTimelength = timelength;
item.mUsageId = usageId;
mPlaylist.push_back(item);
obtain(usageId);
}
void Player::clear()
{
Lock l(mGuard);
while (mPlaylist.size())
onFilePlayed();
}
void Player::retrieveUsageIds(std::vector<int>& ids)
{
ids.assign(mFinishedUsages.begin(), mFinishedUsages.end());
mFinishedUsages.clear();
}

View File

@@ -0,0 +1,67 @@
/* Copyright(C) 2007-2014 VoIP objects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef __AUDIO_PLAYER_H
#define __AUDIO_PLAYER_H
#include "../helper/HL_Log.h"
#include "../helper/HL_Sync.h"
#include "Audio_Interface.h"
#include <deque>
#include <map>
#include <vector>
namespace Audio
{
class Player: public DataConnection
{
friend class DevicePair;
public:
struct PlaylistItem
{
PWavFileReader mFile;
bool mLoop;
int mTimelength;
int mUsageId;
};
typedef std::deque<PlaylistItem> Playlist;
class EndOfAudioDelegate
{
public:
virtual void onFilePlayed(PlaylistItem& item) = 0;
};
protected:
typedef std::map<int, int> UsageMap;
Audio::POutputDevice mOutput;
UsageMap mUsage; // References map
std::vector<int> mFinishedUsages; // Finished plays
Mutex mGuard;
Playlist mPlaylist;
float mPlayedTime;
EndOfAudioDelegate* mDelegate;
void onMicData(const Format& f, const void* buffer, int length);
void onSpkData(const Format& f, void* buffer, int length);
void onFilePlayed();
void scheduleRelease();
void obtain(int usageId);
public:
Player();
~Player();
void setDelegate(EndOfAudioDelegate* d);
EndOfAudioDelegate* getDelegate() const;
void setOutput(POutputDevice output);
POutputDevice getOutput() const;
void add(int usageId, PWavFileReader file, bool loop, int timelength);
void release(int usageId);
void clear();
int releasePlayed();
void retrieveUsageIds(std::vector<int>& ids);
};
}
#endif

View File

@@ -0,0 +1,235 @@
/* Copyright(C) 2007-2014 VoIP objects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../config.h"
#include "Audio_Quality.h"
#include "../helper/HL_Exception.h"
#include "../helper/HL_Types.h"
#include "speex/speex_preprocess.h"
#ifdef WIN32
# include <malloc.h>
#endif
#include <assert.h>
#include <string.h>
using namespace Audio;
#define SHRT_MAX 32767 /* maximum (signed) short value */
AgcFilter::AgcFilter(int channels)
{
static const float DefaultLevel = 0.8f;
for (int i=0; i<channels; i++)
{
Channel c;
float level = DefaultLevel;
c.mSampleMax = 1;
c.mCounter = 0;
c.mIgain = 65536;
if (level > 1.0f)
level = 1.0f;
else
if (level < 0.5f)
level = 0.5f;
c.mIpeak = (int)(SHRT_MAX * level * 65536);
c.mSilenceCounter = 0;
mChannelList.push_back(c);
}
}
AgcFilter::~AgcFilter()
{
}
void AgcFilter::process(void *pcm, int length)
{
for (size_t i=0; i<mChannelList.size(); i++)
processChannel((short*)pcm, length / (sizeof(short) * mChannelList.size()), i);
}
void AgcFilter::processChannel(short* pcm, int nrOfSamples, int channelIndex)
{
int i;
for(i=0; i<nrOfSamples; i++)
{
long gain_new;
int sample;
int sampleIndex = mChannelList.size() * i + channelIndex;
Channel& channel = mChannelList[channelIndex];
/* get the abs of buffer[i] */
sample = pcm[sampleIndex];
sample = (sample < 0 ? -(sample):sample);
if(sample > (int)channel.mSampleMax)
{
/* update the max */
channel.mSampleMax = (unsigned int)sample;
}
channel.mCounter ++;
/* Will we get an overflow with the current gain factor? */
if (((sample * channel.mIgain) >> 16) > channel.mIpeak)
{
/* Yes: Calculate new gain. */
channel.mIgain = ((channel.mIpeak / channel.mSampleMax) * 62259) >> 16;
channel.mSilenceCounter = 0;
pcm[sampleIndex] = (short) ((pcm[sampleIndex] * channel.mIgain) >> 16);
continue;
}
/* Calculate new gain factor 10x per second */
if (channel.mCounter >= AUDIO_SAMPLERATE / 10)
{
if (channel.mSampleMax > AUDIO_SAMPLERATE / 10) /* speaking? */
{
gain_new = ((channel.mIpeak / channel.mSampleMax) * 62259) >> 16;
if (channel.mSilenceCounter > 40) /* pause -> speaking */
channel.mIgain += (gain_new - channel.mIgain) >> 2;
else
channel.mIgain += (gain_new - channel.mIgain) / 20;
channel.mSilenceCounter = 0;
}
else /* silence */
{
channel.mSilenceCounter++;
/* silence > 2 seconds: reduce gain */
if ((channel.mIgain > 65536) && (channel.mSilenceCounter >= 20))
channel.mIgain = (channel.mIgain * 62259) >> 16;
}
channel.mCounter = 0;
channel.mSampleMax = 1;
}
pcm[sampleIndex] = (short) ((pcm[sampleIndex] * channel.mIgain) >> 16);
}
}
// --- AecFilter ---
#ifdef USE_SPEEX_AEC
# include "speex/speex_echo.h"
#include "Audio_Interface.h"
#if !defined(TARGET_WIN)
# include <alloca.h>
#endif
#endif
#ifdef USE_WEBRTC_AEC
# include "aec/echo_cancellation.h"
#endif
#ifdef USE_WEBRTC_AEC
static void CheckWRACode(unsigned errorcode)
{
if (errorcode)
throw Exception(ERR_WEBRTC, errorcode);
}
#endif
AecFilter::AecFilter(int tailTime, int frameTime, int rate)
:mCtx(nullptr), mFrameTime(frameTime), mRate(rate)
{
#ifdef USE_SPEEX_AEC
if (AUDIO_CHANNELS == 2)
mCtx = speex_echo_state_init_mc(frameTime * (mRate / 1000), tailTime * (mRate / 1000), AUDIO_CHANNELS, AUDIO_CHANNELS );
else
mCtx = speex_echo_state_init(frameTime * (mRate / 1000), tailTime * (mRate / 1000));
int tmp = rate;
speex_echo_ctl((SpeexEchoState*)mCtx, SPEEX_ECHO_SET_SAMPLING_RATE, &tmp);
#endif
#ifdef USE_WEBRTC_AEC
CheckWRACode(WebRtcAec_Create(&mCtx));
CheckWRACode(WebRtcAec_Init(mCtx, rate, rate));
#endif
}
AecFilter::~AecFilter()
{
#ifdef USE_SPEEX_AEC
if (mCtx)
{
//speex_echo_state_destroy((SpeexEchoState*)mCtx);
mCtx = nullptr;
}
#endif
#ifdef USE_WEBRTC_AEC
CheckWRACode(WebRtcAec_Free(mCtx));
mCtx = NULL;
#endif
}
void AecFilter::fromMic(void *data)
{
#ifdef USE_SPEEX_AEC
short* output = (short*)alloca(Format().sizeFromTime(AUDIO_MIC_BUFFER_LENGTH));
speex_echo_capture((SpeexEchoState*)mCtx, (short*)data, (short*)output);
memmove(data, output, AUDIO_MIC_BUFFER_SIZE);
#endif
#ifdef USE_WEBRTC_AEC
short* inputframe = (short*)ALLOCA(framesize);
memcpy(inputframe, (char*)data+framesize*i, framesize);
CheckWRACode(WebRtcAec_Process(mCtx, (short*)inputframe, NULL, (short*)data+framesize/2*i, NULL, mFrameTime * mRate / 1000, 0,0));
#endif
}
void AecFilter::toSpeaker(void *data)
{
#ifdef USE_SPEEX_AEC
speex_echo_playback((SpeexEchoState*)mCtx, (short*)data);
#endif
#ifdef USE_WEBRTC_AEC
CheckWRACode(WebRtcAec_BufferFarend(mCtx, (short*)data, length / 2 / AUDIO_CHANNELS));
#endif
}
int AecFilter::frametime()
{
return mFrameTime;
}
DenoiseFilter::DenoiseFilter(int rate)
:mRate(rate)
{
mCtx = speex_preprocess_state_init(mRate/100, mRate);
}
DenoiseFilter::~DenoiseFilter()
{
if (mCtx)
speex_preprocess_state_destroy((SpeexPreprocessState*)mCtx);
}
void DenoiseFilter::fromMic(void* data, int timelength)
{
assert(timelength % 10 == 0);
// Process by 10-ms blocks
spx_int16_t* in = (spx_int16_t*)data;
for (int blockIndex=0; blockIndex<timelength/10; blockIndex++)
{
spx_int16_t* block = in + blockIndex * (mRate / 100) * AUDIO_CHANNELS;
speex_preprocess_run((SpeexPreprocessState*)mCtx, block);
}
}
int DenoiseFilter::rate()
{
return mRate;
}

View File

@@ -0,0 +1,69 @@
/* Copyright(C) 2007-2014 VoIP objects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef __AUDIO_QUALITY_H
#define __AUDIO_QUALITY_H
#include "../config.h"
#include "../helper/HL_Sync.h"
#include <vector>
namespace Audio
{
class AgcFilter
{
protected:
struct Channel
{
unsigned int mSampleMax;
int mCounter;
long mIgain;
int mIpeak;
int mSilenceCounter;
};
std::vector<Channel> mChannelList;
void processChannel(short* pcm, int nrOfSamples, int channelIndex);
public:
AgcFilter(int channels);
~AgcFilter();
void process(void* pcm, int length);
};
class AecFilter
{
public:
AecFilter(int tailTime, int frameTime, int rate);
~AecFilter();
// These methods accept input block with timelength "frameTime" used in constructor.
void toSpeaker(void* data);
void fromMic(void* data);
int frametime();
protected:
void* mCtx; /// The echo canceller context's pointer.
Mutex mGuard; /// Mutex to protect this instance.
int mFrameTime; /// Duration of single audio frame (in milliseconds)
int mRate;
};
class DenoiseFilter
{
public:
DenoiseFilter(int rate);
~DenoiseFilter();
void fromMic(void* data, int timelength);
int rate();
protected:
Mutex mGuard; /// Mutex to protect this instance.
void* mCtx; /// The denoiser context pointer.
int mRate; /// Duration of single audio frame (in milliseconds)
};
}
#endif

View File

@@ -0,0 +1,268 @@
/* Copyright(C) 2007-2014 VoIP objects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "../config.h"
#include "Audio_Resampler.h"
#include <stdlib.h>
#include <assert.h>
#include <memory.h>
#include <algorithm>
#include "speex/speex_resampler.h"
using namespace Audio;
#define IS_FRACTIONAL_RATE(X) (((X) % 8000) != 0)
SpeexResampler::SpeexResampler()
:mContext(NULL), mErrorCode(0), mSourceRate(0), mDestRate(0), mLastSample(0)
{
}
void SpeexResampler::start(int channels, int sourceRate, int destRate)
{
if (mSourceRate == sourceRate && mDestRate == destRate && mContext)
return;
if (mContext)
stop();
mSourceRate = sourceRate;
mDestRate = destRate;
mChannels = channels;
if (sourceRate != destRate)
{
// Defer context creation until first request
//mContext = speex_resampler_init(channels, sourceRate, destRate, AUDIO_RESAMPLER_QUALITY, &mErrorCode);
//assert(mContext != NULL);
}
}
void SpeexResampler::stop()
{
if (mContext)
{
speex_resampler_destroy((SpeexResamplerState*)mContext);
mContext = NULL;
}
}
SpeexResampler::~SpeexResampler()
{
stop();
}
int SpeexResampler::processBuffer(const void* src, int sourceLength, int& sourceProcessed, void* dest, int destCapacity)
{
assert(mSourceRate != 0 && mDestRate != 0);
if (mDestRate == mSourceRate)
{
assert(destCapacity >= sourceLength);
memcpy(dest, src, (size_t)sourceLength);
sourceProcessed = sourceLength;
return sourceLength;
}
if (!mContext)
{
mContext = speex_resampler_init(mChannels, mSourceRate, mDestRate, AUDIO_RESAMPLER_QUALITY, &mErrorCode);
if (!mContext)
return 0;
}
// Check if there is zero samples passed
if (sourceLength / (sizeof(short) * mChannels) == 0)
{
// Consume all data
sourceProcessed = sourceLength;
// But no output
return 0;
}
unsigned outLen = getDestLength(sourceLength);
if (outLen > (unsigned)destCapacity)
return 0; // Skip resampling if not enough space
assert((unsigned)destCapacity >= outLen);
// Calculate number of samples - input length is in bytes
unsigned inLen = sourceLength / (sizeof(short) * mChannels);
outLen /= sizeof(short) * mChannels;
assert(mContext != NULL);
int speexCode = speex_resampler_process_interleaved_int((SpeexResamplerState *)mContext, (spx_int16_t*)src, &inLen,
(spx_int16_t*)dest, &outLen);
assert(speexCode == RESAMPLER_ERR_SUCCESS);
// Return results in bytes
sourceProcessed = inLen * sizeof(short) * mChannels;
return outLen * sizeof(short) * mChannels;
}
int SpeexResampler::sourceRate()
{
return mSourceRate;
}
int SpeexResampler::destRate()
{
return mDestRate;
}
int SpeexResampler::getDestLength(int sourceLen)
{
return int(sourceLen * (float(mDestRate) / mSourceRate) + 0.5) / 2 * 2;
}
int SpeexResampler::getSourceLength(int destLen)
{
return int(destLen * (float(mSourceRate) / mDestRate) + 0.5) / 2 * 2;
}
// Returns instance + speex resampler size in bytes
int SpeexResampler::getSize() const
{
return sizeof(*this) + 200; // 200 is approximate size of speex resample structure
}
// -------------------------- ChannelConverter --------------------
int ChannelConverter::stereoToMono(const void *source, int sourceLength, void *dest, int destLength)
{
assert(destLength == sourceLength / 2);
const short* input = (const short*)source;
short* output = (short*)dest;
for (int sampleIndex = 0; sampleIndex < destLength/2; sampleIndex++)
{
output[sampleIndex] = (input[sampleIndex*2] + input[sampleIndex*2+1]) >> 1;
}
return sourceLength / 2;
}
int ChannelConverter::monoToStereo(const void *source, int sourceLength, void *dest, int destLength)
{
assert(destLength == sourceLength * 2);
const short* input = (const short*)source;
short* output = (short*)dest;
// Convert starting from the end of buffer to allow inplace conversion
for (int sampleIndex = sourceLength/2 - 1; sampleIndex >= 0; sampleIndex--)
{
output[2*sampleIndex] = output[2*sampleIndex+1] = input[sampleIndex];
}
return sourceLength * 2;
}
Resampler48kTo16k::Resampler48kTo16k()
{
WebRtcSpl_ResetResample48khzTo16khz(&mContext);
}
Resampler48kTo16k::~Resampler48kTo16k()
{
WebRtcSpl_ResetResample48khzTo16khz(&mContext);
}
int Resampler48kTo16k::process(const void *source, int sourceLen, void *dest, int destLen)
{
const short* input = (const short*)source; int inputLen = sourceLen / 2;
short* output = (short*)dest; //int outputCapacity = destLen / 2;
assert(inputLen % 480 == 0);
int frames = inputLen / 480;
for (int i=0; i<frames; i++)
WebRtcSpl_Resample48khzTo16khz(input + i * 480, output + i * 160, &mContext, mTemp);
return sourceLen / 3;
}
Resampler16kto48k::Resampler16kto48k()
{
WebRtcSpl_ResetResample16khzTo48khz(&mContext);
}
Resampler16kto48k::~Resampler16kto48k()
{
WebRtcSpl_ResetResample16khzTo48khz(&mContext);
}
int Resampler16kto48k::process(const void *source, int sourceLen, void *dest, int destLen)
{
const WebRtc_Word16* input = (const WebRtc_Word16*)source; int inputLen = sourceLen / 2;
WebRtc_Word16* output = (WebRtc_Word16*)dest; //int outputCapacity = destLen / 2;
assert(inputLen % 160 == 0);
int frames = inputLen / 160;
for (int i=0; i<frames; i++)
WebRtcSpl_Resample16khzTo48khz(input + i * 160, output + i * 480, &mContext, mTemp);
return sourceLen * 3;
}
// ---------------- UniversalResampler -------------------
UniversalResampler::UniversalResampler()
{
}
UniversalResampler::~UniversalResampler()
{
}
int UniversalResampler::resample(int sourceRate, const void *sourceBuffer, int sourceLength, int& sourceProcessed, int destRate, void *destBuffer, int destCapacity)
{
assert(destBuffer && sourceBuffer);
int result;
if (sourceRate == destRate)
{
assert(destCapacity >= sourceLength);
memcpy(destBuffer, sourceBuffer, (size_t)sourceLength);
sourceProcessed = sourceLength;
result = sourceLength;
}
else
{
PResampler r = findResampler(sourceRate, destRate);
result = r->processBuffer(sourceBuffer, sourceLength, sourceProcessed, destBuffer, destCapacity);
}
return result;
}
void UniversalResampler::preload()
{
}
int UniversalResampler::getDestLength(int sourceRate, int destRate, int sourceLength)
{
if (sourceRate == destRate)
return sourceLength;
else
return findResampler(sourceRate, destRate)->getDestLength(sourceLength);
}
int UniversalResampler::getSourceLength(int sourceRate, int destRate, int destLength)
{
if (sourceRate == destRate)
return destLength;
else
return findResampler(sourceRate, destRate)->getSourceLength(destLength);
}
PResampler UniversalResampler::findResampler(int sourceRate, int destRate)
{
assert(sourceRate != destRate);
ResamplerMap::iterator resamplerIter = mResamplerMap.find(RatePair(sourceRate, destRate));
PResampler r;
if (resamplerIter == mResamplerMap.end())
{
r = PResampler(new Resampler());
r->start(AUDIO_CHANNELS, sourceRate, destRate);
mResamplerMap[RatePair(sourceRate, destRate)] = r;
}
else
r = resamplerIter->second;
return r;
}

View File

@@ -0,0 +1,97 @@
/* Copyright(C) 2007-2014 VoIP objects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef __AUDIO_RESAMPLER_H
#define __AUDIO_RESAMPLER_H
#include "signal_processing_library/signal_processing_library.h"
#include "../helper/HL_Pointer.h"
#include <vector>
#include <memory>
namespace Audio
{
class SpeexResampler
{
public:
SpeexResampler();
~SpeexResampler();
void start(int channels, int sourceRate, int destRate);
void stop();
int processBuffer(const void* source, int sourceLength, int& sourceProcessed, void* dest, int destCapacity);
int sourceRate();
int destRate();
int getDestLength(int sourceLen);
int getSourceLength(int destLen);
// Returns instance + speex encoder size in bytes
int getSize() const;
protected:
void* mContext;
int mErrorCode;
int mSourceRate,
mDestRate,
mChannels;
short mLastSample;
};
typedef SpeexResampler Resampler;
typedef std::shared_ptr<Resampler> PResampler;
class ChannelConverter
{
public:
static int stereoToMono(const void* source, int sourceLength, void* dest, int destLength);
static int monoToStereo(const void* source, int sourceLength, void* dest, int destLength);
};
// Operates with AUDIO_CHANNELS number of channels
class UniversalResampler
{
public:
UniversalResampler();
~UniversalResampler();
int resample(int sourceRate, const void* sourceBuffer, int sourceLength, int& sourceProcessed, int destRate, void* destBuffer, int destCapacity);
int getDestLength(int sourceRate, int destRate, int sourceLength);
int getSourceLength(int sourceRate, int destRate, int destLength);
protected:
typedef std::pair<int, int> RatePair;
typedef std::map<RatePair, PResampler> ResamplerMap;
ResamplerMap mResamplerMap;
PResampler findResampler(int sourceRate, int destRate);
void preload();
};
// n*10 milliseconds buffers required!
class Resampler48kTo16k
{
public:
Resampler48kTo16k();
~Resampler48kTo16k();
int process(const void* source, int sourceLen, void* dest, int destLen);
protected:
WebRtc_Word32 mTemp[496];
WebRtcSpl_State48khzTo16khz mContext;
};
class Resampler16kto48k
{
public:
Resampler16kto48k();
~Resampler16kto48k();
int process(const void* source, int sourceLen, void* dest, int destLen);
protected:
WebRtc_Word32 mTemp[336];
WebRtcSpl_State16khzTo48khz mContext;
};
}
#endif

View File

@@ -0,0 +1,374 @@
/* Copyright(C) 2007-2017 VoIPobjects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "Audio_WavFile.h"
#include "helper/HL_Exception.h"
#include "helper/HL_String.h"
#include "helper/HL_Log.h"
#include "../config.h"
#include <memory.h>
#ifndef WORD
# define WORD unsigned short
#endif
#ifndef DWORD
# define DWORD unsigned int
#endif
typedef struct {
WORD wFormatTag;
WORD nChannels;
DWORD nSamplesPerSec;
DWORD nAvgBytesPerSec;
WORD nBlockAlign;
WORD wBitsPerSample;
WORD cbSize;
}WAVEFORMATEX;
#define WAVE_FORMAT_PCM 1
#define LOG_SUBSYSTEM "WavFileReader"
using namespace Audio;
// ---------------------- WavFileReader -------------------------
WavFileReader::WavFileReader()
:mHandle(NULL), mRate(0)
{
mDataOffset = 0;
}
WavFileReader::~WavFileReader()
{
}
#define THROW_READERROR throw Exception(ERR_WAVFILE_FAILED);
std::string WavFileReader::readChunk()
{
char name[5];
if (fread(name, 1, 4, mHandle) != 4)
THROW_READERROR;
name[4] = 0;
std::string result = name;
unsigned size;
if (fread(&size, 4, 1, mHandle) != 1)
THROW_READERROR;
if (result == "fact")
fread(&mDataLength, 4, 1, mHandle);
else
if (result != "data")
fseek(mHandle, size, SEEK_CUR);
else
mDataLength = size;
return result;
}
bool WavFileReader::open(const std::tstring& filename)
{
Lock lock(mFileMtx);
try
{
#ifdef WIN32
mHandle = _wfopen(filename.c_str(), L"rb");
#else
mHandle = fopen(StringHelper::makeUtf8(filename).c_str(), "rb");
#endif
if (NULL == mHandle)
return false;
// Read the .WAV header
char riff[4];
if (fread(riff, 4, 1, mHandle) < 1)
THROW_READERROR;
if (!(riff[0] == 'R' && riff[1] == 'I' && riff[2] == 'F' && riff[3] == 'F'))
THROW_READERROR;
// Read the file size
unsigned int filesize = 0;
if (fread(&filesize, 4, 1, mHandle) < 1)
THROW_READERROR;
char wavefmt[9];
if (fread(wavefmt, 8, 1, mHandle) < 1)
THROW_READERROR;
wavefmt[8] = 0;
if (strcmp(wavefmt, "WAVEfmt ") != 0)
THROW_READERROR;
unsigned fmtSize = 0;
if (fread(&fmtSize, 4, 1, mHandle) < 1)
THROW_READERROR;
unsigned fmtStart = ftell(mHandle);
unsigned short formattag = 0;
if (fread(&formattag, 2, 1, mHandle) < 1)
THROW_READERROR;
if (formattag != 1/*WAVE_FORMAT_PCM*/)
THROW_READERROR;
mChannels = 0;
if (fread(&mChannels, 2, 1, mHandle) < 1)
THROW_READERROR;
mRate = 0;
if (fread(&mRate, 4, 1, mHandle) < 1)
THROW_READERROR;
unsigned int avgbytespersec = 0;
if (fread(&avgbytespersec, 4, 1, mHandle) < 1)
THROW_READERROR;
unsigned short blockalign = 0;
if (fread(&blockalign, 2, 1, mHandle) < 1)
THROW_READERROR;
mBits = 0;
if (fread(&mBits, 2, 1, mHandle) < 1)
THROW_READERROR;
if (mBits !=8 && mBits != 16)
THROW_READERROR;
// Read the "chunk"
fseek(mHandle, fmtStart + fmtSize, SEEK_SET);
//unsigned pos = ftell(mHandle);
mDataLength = 0;
while (readChunk() != "data")
;
mFileName = filename;
mDataOffset = ftell(mHandle);
mResampler.start(AUDIO_CHANNELS, mRate, AUDIO_SAMPLERATE);
}
catch(...)
{
fclose(mHandle); mHandle = NULL;
}
return isOpened();
}
void WavFileReader::close()
{
Lock lock(mFileMtx);
if (NULL != mHandle)
fclose(mHandle);
mHandle = NULL;
}
int WavFileReader::rate() const
{
return mRate;
}
unsigned WavFileReader::read(void* buffer, unsigned bytes)
{
return read((short*)buffer, bytes / (AUDIO_CHANNELS * 2)) * AUDIO_CHANNELS * 2;
}
unsigned WavFileReader::read(short* buffer, unsigned samples)
{
Lock lock(mFileMtx);
if (!mHandle)
return 0;
// Get number of samples that must be read from source file
int requiredBytes = mResampler.getSourceLength(samples) * mChannels * mBits / 8;
void* temp = alloca(requiredBytes);
memset(temp, 0, requiredBytes);
// Find required size of input buffer
if (mDataLength)
{
unsigned filePosition = ftell(mHandle);
// Check how much data we can read
unsigned fileAvailable = mDataLength + mDataOffset - filePosition;
requiredBytes = (int)fileAvailable < requiredBytes ? (int)fileAvailable : requiredBytes;
}
/*int readSamples = */fread(temp, 1, requiredBytes, mHandle);// / mChannels / (mBits / 8);
int processedBytes = 0;
int result = mResampler.processBuffer(temp, requiredBytes, processedBytes, buffer, samples * 2 * AUDIO_CHANNELS);
return result / 2 / AUDIO_CHANNELS;
}
bool WavFileReader::isOpened()
{
Lock lock(mFileMtx);
return (mHandle != 0);
}
void WavFileReader::rewind()
{
Lock l(mFileMtx);
if (mHandle)
fseek(mHandle, mDataOffset, SEEK_SET);
}
std::tstring WavFileReader::filename() const
{
Lock lock(mFileMtx);
return mFileName;
}
unsigned WavFileReader::size() const
{
Lock l(mFileMtx);
return mDataLength;
}
// ------------------------- WavFileWriter -------------------------
#define LOG_SUBSYTEM "WavFileWriter"
#define BITS_PER_CHANNEL 16
WavFileWriter::WavFileWriter()
:mHandle(NULL), mLengthOffset(0), mRate(AUDIO_SAMPLERATE), mChannels(1)
{
}
WavFileWriter::~WavFileWriter()
{
close();
}
void WavFileWriter::checkWriteResult(int result)
{
if (result < 1)
throw Exception(ERR_WAVFILE_FAILED, errno);
}
bool WavFileWriter::open(const std::tstring& filename, int rate, int channels)
{
Lock lock(mFileMtx);
close();
mRate = rate;
mChannels = channels;
#ifdef WIN32
mHandle = _wfopen(filename.c_str(), L"wb");
#else
mHandle = fopen(StringHelper::makeUtf8(filename).c_str(), "wb");
#endif
if (NULL == mHandle)
{
ICELogCritical(<< "Failed to create .wav file: filename = " << StringHelper::makeUtf8(filename) << " , error = " << errno);
return false;
}
// Write the .WAV header
const char* riff = "RIFF";
checkWriteResult( fwrite(riff, 4, 1, mHandle) );
// Write the file size
unsigned int filesize = 0;
checkWriteResult( fwrite(&filesize, 4, 1, mHandle) );
const char* wavefmt = "WAVEfmt ";
checkWriteResult( fwrite(wavefmt, 8, 1, mHandle) );
// Set the format description
DWORD dwFmtSize = 16; /*= 16L*/;
checkWriteResult( fwrite(&dwFmtSize, sizeof(dwFmtSize), 1, mHandle) );
WAVEFORMATEX format;
format.wFormatTag = WAVE_FORMAT_PCM;
checkWriteResult( fwrite(&format.wFormatTag, sizeof(format.wFormatTag), 1, mHandle) );
format.nChannels = mChannels;
checkWriteResult( fwrite(&format.nChannels, sizeof(format.nChannels), 1, mHandle) );
format.nSamplesPerSec = mRate;
checkWriteResult( fwrite(&format.nSamplesPerSec, sizeof(format.nSamplesPerSec), 1, mHandle) );
format.nAvgBytesPerSec = mRate * 2 * mChannels;
checkWriteResult( fwrite(&format.nAvgBytesPerSec, sizeof(format.nAvgBytesPerSec), 1, mHandle) );
format.nBlockAlign = 2 * mChannels;
checkWriteResult( fwrite(&format.nBlockAlign, sizeof(format.nBlockAlign), 1, mHandle) );
format.wBitsPerSample = BITS_PER_CHANNEL;
checkWriteResult( fwrite(&format.wBitsPerSample, sizeof(format.wBitsPerSample), 1, mHandle) );
const char* data = "data";
checkWriteResult( fwrite(data, 4, 1, mHandle));
mFileName = filename;
mWritten = 0;
mLengthOffset = ftell(mHandle);
checkWriteResult( fwrite(&mWritten, 4, 1, mHandle) );
return isOpened();
}
void WavFileWriter::close()
{
Lock lock(mFileMtx);
if (mHandle)
{
fclose(mHandle);
mHandle = NULL;
}
}
unsigned WavFileWriter::write(const void* buffer, unsigned bytes)
{
Lock l(mFileMtx);
if (!mHandle)
return 0;
// Seek the end of file
fseek(mHandle, 0, SEEK_END);
mWritten += bytes;
// Write the data
fwrite(buffer, bytes, 1, mHandle);
// Write file length
fseek(mHandle, 4, SEEK_SET);
unsigned int fl = mWritten + 36;
fwrite(&fl, sizeof(fl), 1, mHandle);
// Write data length
fseek(mHandle, mLengthOffset, SEEK_SET);
checkWriteResult( fwrite(&mWritten, 4, 1, mHandle) );
return bytes;
}
bool WavFileWriter::isOpened()
{
Lock lock(mFileMtx);
return (mHandle != 0);
}
std::tstring WavFileWriter::filename()
{
Lock lock(mFileMtx);
return mFileName;
}

View File

@@ -0,0 +1,82 @@
/* Copyright(C) 2007-2017 VoIPobjects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef __AUDIO_WAVFILE_H
#define __AUDIO_WAVFILE_H
#include "helper/HL_Sync.h"
#include "helper/HL_Types.h"
#include "Audio_Resampler.h"
#include <stdio.h>
#include <string>
#include <memory>
namespace Audio
{
class WavFileReader
{
protected:
FILE* mHandle;
short mChannels;
short mBits;
int mRate;
std::tstring mFileName;
mutable Mutex mFileMtx;
unsigned mDataOffset;
unsigned mDataLength;
Resampler mResampler;
std::string readChunk();
public:
WavFileReader();
~WavFileReader();
bool open(const std::tstring& filename);
void close();
bool isOpened();
void rewind();
int rate() const;
// This method returns number of read bytes
unsigned read(void* buffer, unsigned bytes);
// This method returns number of read samples
unsigned read(short* buffer, unsigned samples);
std::tstring filename() const;
unsigned size() const;
};
typedef std::shared_ptr<WavFileReader> PWavFileReader;
class WavFileWriter
{
protected:
FILE* mHandle; /// Handle of audio file.
std::tstring mFileName; /// Path to requested audio file.
Mutex mFileMtx; /// Mutex to protect this instance.
int mWritten; /// Amount of written data (in bytes)
int mLengthOffset; /// Position of length field.
int mRate, mChannels;
void checkWriteResult(int result);
public:
WavFileWriter();
~WavFileWriter();
bool open(const std::tstring& filename, int rate, int channels);
void close();
bool isOpened();
unsigned write(const void* buffer, unsigned bytes);
std::tstring filename();
};
typedef std::shared_ptr<WavFileWriter> PWavFileWriter;
}
#endif

View File

@@ -0,0 +1,555 @@
/* Copyright(C) 2007-2014 VoIP objects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifdef TARGET_WIN
#include "Audio_Wmme.h"
#include "Audio_Helper.h"
#include "../Helper/HL_Exception.h"
#include <process.h>
using namespace Audio;
WmmeInputDevice::Buffer::Buffer()
{
// Do not use WAVEHDR allocated on stack!
mHeaderHandle = GlobalAlloc(GMEM_MOVEABLE | GMEM_SHARE, sizeof WAVEHDR);
if (!mHeaderHandle)
throw Exception(ERR_WMME_FAILED, GetLastError());
mHeader = (WAVEHDR*)GlobalLock(mHeaderHandle);
mDataHandle = GlobalAlloc(GMEM_MOVEABLE | GMEM_SHARE, AUDIO_MIC_BUFFER_SIZE);
if (!mDataHandle)
throw Exception(ERR_WMME_FAILED, GetLastError());
mData = GlobalLock(mDataHandle);
memset(mHeader, 0, sizeof *mHeader);
mHeader->dwBufferLength = AUDIO_MIC_BUFFER_SIZE;
mHeader->dwFlags = 0;
mHeader->lpData = (LPSTR)mData;
}
WmmeInputDevice::Buffer::~Buffer()
{
if (mDataHandle)
{
GlobalUnlock(mDataHandle);
GlobalFree(mDataHandle);
}
if (mHeaderHandle)
{
GlobalUnlock(mHeaderHandle);
GlobalFree(mHeaderHandle);
}
}
bool WmmeInputDevice::Buffer::prepare(HWAVEIN device)
{
MMRESULT resCode = MMSYSERR_NOERROR;
mHeader->dwFlags = 0;
mHeader->dwBufferLength = AUDIO_MIC_BUFFER_SIZE;
mHeader->lpData = (LPSTR)mData;
resCode = waveInPrepareHeader(device, mHeader, sizeof *mHeader);
//if (resCode != MMSYSERR_NOERROR)
// LogCritical("Audio", << "Failed to prepare source header. Error code " << resCode << ".");
return resCode == MMSYSERR_NOERROR;
}
bool WmmeInputDevice::Buffer::unprepare(HWAVEIN device)
{
if (mHeader->dwFlags & WHDR_PREPARED)
{
MMRESULT resCode = waveInUnprepareHeader(device, mHeader, sizeof *mHeader);
//if (resCode != MMSYSERR_NOERROR)
// LogCritical("Audio", << "Failed to unprepare source header. Error code " << resCode << ".");
return resCode == MMSYSERR_NOERROR;
}
return true;
}
bool WmmeInputDevice::Buffer::isFinished()
{
return (mHeader->dwFlags & WHDR_DONE) != 0;
}
bool WmmeInputDevice::Buffer::addToDevice(HWAVEIN device)
{
MMRESULT resCode = waveInAddBuffer(device, mHeader, sizeof(*mHeader));
//if (resCode != MMSYSERR_NOERROR)
// LogCritical("Audio", << "Failed to add buffer to source audio device. Error code is " << resCode << ".");
return resCode == MMSYSERR_NOERROR;
}
void* WmmeInputDevice::Buffer::data()
{
return mData;
}
WmmeInputDevice::WmmeInputDevice(int deviceId)
:mDevHandle(NULL), mDoneSignal(INVALID_HANDLE_VALUE), mFakeMode(false),
mBufferIndex(0), mDeviceIndex(deviceId), mThreadHandle(0)
{
mDoneSignal = ::CreateEvent(NULL, FALSE, FALSE, NULL);
mShutdownSignal = ::CreateEvent(NULL, FALSE, FALSE, NULL);
mRefCount = 0;
}
WmmeInputDevice::~WmmeInputDevice()
{
close();
::CloseHandle(mDoneSignal);
::CloseHandle(mShutdownSignal);
}
bool WmmeInputDevice::fakeMode()
{
return mFakeMode;
}
void CALLBACK WmmeInputDevice::callbackProc(HWAVEIN hwi, UINT uMsg, DWORD_PTR dwInstance, DWORD_PTR dwParam1, DWORD_PTR dwParam2)
{
WmmeInputDevice* impl;
switch(uMsg)
{
case WIM_DATA:
impl = (WmmeInputDevice*)dwInstance;
SetEvent(impl->mDoneSignal);
break;
case WIM_CLOSE:
break;
case WIM_OPEN:
break;
}
}
void WmmeInputDevice::openDevice()
{
// Build WAVEFORMATEX structure
WAVEFORMATEX wfx;
memset(&wfx, 0, sizeof(wfx));
wfx.wFormatTag = WAVE_FORMAT_PCM;
wfx.nChannels = AUDIO_CHANNELS;
wfx.nSamplesPerSec = AUDIO_SAMPLERATE;
wfx.wBitsPerSample = 16;
wfx.cbSize = 0;
wfx.nBlockAlign = wfx.wBitsPerSample * wfx.nChannels / 8;
wfx.nAvgBytesPerSec = wfx.nBlockAlign * wfx.nSamplesPerSec;
// Open wavein
MMRESULT mmres = waveInOpen(&mDevHandle, mDeviceIndex, &wfx, (DWORD_PTR)callbackProc, (DWORD_PTR)this, CALLBACK_FUNCTION);
if (mmres != MMSYSERR_NOERROR)
{
mFakeMode = true;
return;
}
else
mFakeMode = false;
// Create the buffers for running
mBufferIndex = 0;
for (int i=0; i<AUDIO_MIC_BUFFER_COUNT; i++)
mBufferList[i].prepare(mDevHandle);
for (int i=0; i<AUDIO_MIC_BUFFER_COUNT; i++)
mBufferList[i].addToDevice(mDevHandle);
/*mmres = */waveInStart(mDevHandle);
}
bool WmmeInputDevice::open()
{
Lock lock(mGuard);
mRefCount++;
if (mRefCount > 1)
return true;
mThreadHandle = (HANDLE)_beginthread(&threadProc, 0, this);
return true;
}
void WmmeInputDevice::closeDevice()
{
// Stop device
if (mDevHandle)
{
MMRESULT mmres = MMSYSERR_NOERROR;
waveInReset(mDevHandle);
waveInStop(mDevHandle);
}
// Close buffers
for (int i=0; i<AUDIO_MIC_BUFFER_COUNT; i++)
mBufferList[i].unprepare(mDevHandle);
// Close device
if (mDevHandle)
{
waveInClose(mDevHandle);
mDevHandle = NULL;
}
}
void WmmeInputDevice::close()
{
Lock l(mGuard);
mRefCount--;
if (mRefCount != 0)
return;
// Set shutdown signal
if (!mThreadHandle)
return;
::SetEvent(mShutdownSignal);
::WaitForSingleObject(mThreadHandle, INFINITE);
mThreadHandle = 0;
}
bool WmmeInputDevice::tryReadBuffer(void* buffer)
{
Buffer& devBuffer = mBufferList[mBufferIndex];
if (!devBuffer.isFinished())
return false;
memcpy(buffer, devBuffer.data(), AUDIO_MIC_BUFFER_SIZE);
devBuffer.unprepare(mDevHandle);
devBuffer.prepare(mDevHandle);
if (!devBuffer.addToDevice(mDevHandle))
setFakeMode(true);
else
{
}
mBufferIndex = (mBufferIndex + 1) % AUDIO_MIC_BUFFER_COUNT;
return true;
}
void WmmeInputDevice::setFakeMode(bool fakeMode)
{
mFakeMode = fakeMode;
}
int WmmeInputDevice::readBuffer(void* buffer)
{
//Lock lock(mGuard);
if (mRefCount <= 0 || mFakeMode)
return 0;
// Check for finished buffer
while (!tryReadBuffer(buffer))
WaitForSingleObject(mDoneSignal, 50);
return AUDIO_MIC_BUFFER_SIZE;
}
HWAVEIN WmmeInputDevice::handle()
{
Lock lock(mGuard);
return mDevHandle;
}
void WmmeInputDevice::threadProc(void* arg)
{
WmmeInputDevice* impl = (WmmeInputDevice*)arg;
impl->openDevice();
void* buffer = _alloca(AUDIO_MIC_BUFFER_SIZE);
DWORD waitResult = 0;
HANDLE waitArray[2] = {impl->mDoneSignal, impl->mShutdownSignal};
DWORD wr;
do
{
wr = ::WaitForMultipleObjects(2, waitArray, FALSE, INFINITE);
if (wr == WAIT_OBJECT_0)
{
impl->readBuffer(buffer);
if (impl->connection())
impl->connection()->onMicData(Format(), buffer, AUDIO_MIC_BUFFER_SIZE);
}
} while (wr == WAIT_OBJECT_0);
impl->closeDevice();
}
// --- WmmeOutputDevice ---
WmmeOutputDevice::Buffer::Buffer()
:mHeaderHandle(NULL), mDataHandle(NULL), mData(NULL), mHeader(NULL)
{
mHeaderHandle = GlobalAlloc(GMEM_MOVEABLE | GMEM_SHARE, AUDIO_SPK_BUFFER_SIZE);
if (!mHeaderHandle)
throw Exception(ERR_NOMEM);
mDataHandle = GlobalAlloc(GMEM_MOVEABLE | GMEM_SHARE, AUDIO_SPK_BUFFER_SIZE);
if (!mDataHandle)
throw Exception(ERR_NOMEM);
mHeader = (WAVEHDR*)GlobalLock(mHeaderHandle);
mData = GlobalLock(mDataHandle);
memset(mHeader, 0, sizeof *mHeader);
mHeader->dwBufferLength = AUDIO_SPK_BUFFER_SIZE;
mHeader->lpData = (LPSTR)mData;
}
WmmeOutputDevice::Buffer::~Buffer()
{
if (mHeaderHandle)
{
GlobalUnlock(mHeaderHandle);
GlobalFree(mHeaderHandle);
}
if (mDataHandle)
{
GlobalUnlock(mDataHandle);
GlobalFree(mDataHandle);
}
}
bool WmmeOutputDevice::Buffer::prepare(HWAVEOUT device)
{
MMRESULT result;
result = ::waveOutPrepareHeader(device, mHeader, sizeof *mHeader);
return result == MMSYSERR_NOERROR;
}
bool WmmeOutputDevice::Buffer::unprepare(HWAVEOUT device)
{
MMRESULT result;
result = ::waveOutUnprepareHeader(device, mHeader, sizeof *mHeader);
return result == MMSYSERR_NOERROR;
}
bool WmmeOutputDevice::Buffer::write(HWAVEOUT device)
{
MMRESULT result;
result = ::waveOutWrite(device, mHeader, sizeof *mHeader);
return result == MMSYSERR_NOERROR;
}
WmmeOutputDevice::WmmeOutputDevice(int index)
:mDevice(NULL), mDeviceIndex(index), mPlayedTime(0), mPlayedCount(0), mBufferIndex(0), mThreadHandle(NULL),
mFailed(false), mShutdownMarker(false)
{
mDoneSignal = ::CreateEvent(NULL, FALSE, FALSE, NULL);
mShutdownSignal = ::CreateEvent(NULL, FALSE, FALSE, NULL);
}
WmmeOutputDevice::~WmmeOutputDevice()
{
close();
// Destroy used signals
CloseHandle(mDoneSignal); CloseHandle(mShutdownSignal);
}
bool WmmeOutputDevice::open()
{
// Start thread
mThreadHandle = (HANDLE)_beginthread(&threadProc, 0, this);
return true;
}
void WmmeOutputDevice::close()
{
// Tell the thread to exit
SetEvent(mShutdownSignal);
mShutdownMarker = true;
// Wait for thread
if (mThreadHandle)
WaitForSingleObject(mThreadHandle, INFINITE);
mThreadHandle = 0;
}
void WmmeOutputDevice::openDevice()
{
mClosing = false;
MMRESULT mmres = 0;
WAVEFORMATEX wfx;
memset(&wfx, 0, sizeof(wfx));
wfx.wFormatTag = 0x0001;
wfx.nChannels = AUDIO_CHANNELS;
wfx.nSamplesPerSec = AUDIO_SAMPLERATE;
wfx.wBitsPerSample = 16;
wfx.cbSize = 0;
wfx.nBlockAlign = wfx.wBitsPerSample * wfx.nChannels / 8;
wfx.nAvgBytesPerSec = wfx.nBlockAlign * wfx.nSamplesPerSec;
mmres = waveOutOpen(&mDevice, mDeviceIndex, &wfx, (DWORD_PTR)&callbackProc, (DWORD_PTR)this, CALLBACK_FUNCTION);
if (mmres != MMSYSERR_NOERROR)
throw Exception(ERR_WMME_FAILED, mmres);
// Prebuffer silence
for (unsigned i=0; i<AUDIO_SPK_BUFFER_COUNT; i++)
{
//bool dumb = false;
//mCallback(mBufferList[i].mData, SPK_BUFFER_SIZE, dumb, dumb);
memset(mBufferList[i].mData, 0, AUDIO_SPK_BUFFER_SIZE);
mBufferList[i].prepare(mDevice);
mBufferList[i].write(mDevice);
}
}
void WmmeOutputDevice::closeDevice()
{
Lock l(mGuard);
mClosing = true;
bool finished = false;
while (!finished)
{
WaitForSingleObject(mDoneSignal, 10);
finished = areBuffersFinished();
}
if (mDevice)
{
waveOutReset(mDevice);
waveOutClose(mDevice);
}
mDevice = NULL;
}
bool WmmeOutputDevice::areBuffersFinished()
{
Lock l(mGuard);
bool result = true;
for (unsigned i=0; i<AUDIO_SPK_BUFFER_COUNT && result; i++)
{
bool finished = mBufferList[i].mHeader->dwFlags & WHDR_DONE ||
!mBufferList[i].mHeader->dwFlags;
if (finished)
{
/* if (mBufferList[i].mHeader->dwFlags & WHDR_PREPARED)
mBufferList[i].Unprepare(mDevice); */
}
result &= finished;
}
return result;
}
void WmmeOutputDevice::threadProc(void* arg)
{
WmmeOutputDevice* impl = (WmmeOutputDevice*)arg;
impl->openDevice();
DWORD waitResult = 0;
HANDLE waitArray[2] = {impl->mDoneSignal, impl->mShutdownSignal};
unsigned index, i;
unsigned exitCount = 0;
bool exitSignal = false;
do
{
// Poll for exit signal
if (!exitSignal)
exitSignal = impl->mShutdownMarker;
// Wait for played buffer
WaitForSingleObject(impl->mDoneSignal, 500);
// Iterate buffers to find played
for (i=0; i<AUDIO_SPK_BUFFER_COUNT; i++)
{
index = (impl->mBufferIndex + i) % AUDIO_SPK_BUFFER_COUNT;
Buffer& buffer = impl->mBufferList[index];
if (!(buffer.mHeader->dwFlags & WHDR_DONE))
break;
buffer.unprepare(impl->mDevice);
if (!exitSignal)
{
bool useAEC = true;
if (impl->connection())
impl->connection()->onSpkData(Format(), buffer.mData, AUDIO_SPK_BUFFER_SIZE);
else
memset(buffer.mData, 0, AUDIO_SPK_BUFFER_SIZE);
buffer.prepare(impl->mDevice);
buffer.write(impl->mDevice);
}
else
exitCount++;
}
impl->mBufferIndex = (impl->mBufferIndex + i) % AUDIO_SPK_BUFFER_COUNT;
}
while (!exitSignal || exitCount < AUDIO_SPK_BUFFER_COUNT);
impl->closeDevice();
}
HWAVEOUT WmmeOutputDevice::handle()
{
return mDevice;
}
unsigned WmmeOutputDevice::playedTime()
{
if (!mDevice)
return 0;
unsigned result = 0;
MMTIME mmt;
memset(&mmt, 0, sizeof(mmt));
mmt.wType = TIME_SAMPLES;
MMRESULT rescode = waveOutGetPosition(mDevice, &mmt, sizeof(mmt));
if (rescode != MMSYSERR_NOERROR || mmt.wType != TIME_SAMPLES)
closeDevice();
else
{
if (mmt.u.ms < mPlayedTime)
result = 0;
else
{
result = mmt.u.ms - mPlayedTime;
mPlayedTime = mmt.u.ms - result % 8;
}
}
return result / 8;
}
void WmmeOutputDevice::setFakeMode(bool fakemode)
{
closeDevice();
}
bool WmmeOutputDevice::fakeMode()
{
return mFailed;
}
bool WmmeOutputDevice::closing()
{
return mClosing;
}
void CALLBACK WmmeOutputDevice::callbackProc(HWAVEOUT hwo, UINT msg, DWORD_PTR dwInstance, DWORD_PTR dwParam1, DWORD_PTR dwParam2)
{
WmmeOutputDevice* impl;
if (msg == WOM_DONE)
{
impl = (WmmeOutputDevice*)dwInstance;
InterlockedIncrement(&impl->mPlayedCount);
SetEvent(impl->mDoneSignal);
}
}
#endif

View File

@@ -0,0 +1,148 @@
/* Copyright(C) 2007-2014 VoIP objects (voipobjects.com)
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef __AUDIO_WMME_H
#define __AUDIO_WMME_H
#ifdef TARGET_WIN
#include "../config.h"
#include <winsock2.h>
#include <windows.h>
#include <mmsystem.h>
#include "../Helper/HL_Sync.h"
#include "Audio_Interface.h"
#include <deque>
#include <EndpointVolume.h>
#include <MMDeviceAPI.h>
#if defined(_MSC_VER)
#include <Functiondiscoverykeys_devpkey.h>
#endif
#include <vector>
#include <string>
namespace Audio
{
class WmmeInputDevice: public InputDevice
{
public:
WmmeInputDevice(int index);
~WmmeInputDevice();
bool open();
void close();
bool fakeMode();
void setFakeMode(bool fakeMode);
int readBuffer(void* buffer);
HWAVEIN handle();
protected:
class Buffer
{
public:
Buffer();
~Buffer();
bool prepare(HWAVEIN device);
bool unprepare(HWAVEIN device);
bool isFinished();
bool addToDevice(HWAVEIN device);
void* data();
protected:
HGLOBAL mDataHandle;
void* mData;
HGLOBAL mHeaderHandle;
WAVEHDR* mHeader;
};
Mutex mGuard; /// Mutex to protect this instance.
HWAVEIN mDevHandle; /// Handle of opened capture device.
HANDLE mThreadHandle;
HANDLE mShutdownSignal;
HANDLE mDoneSignal; /// Event handle to signal about finished capture.
Buffer mBufferList[AUDIO_MIC_BUFFER_COUNT];
unsigned mBufferIndex;
int mDeviceIndex; /// Index of capture device.
volatile bool mFakeMode; /// Marks if fake mode is active.
int mRefCount;
bool tryReadBuffer(void* buffer);
void openDevice();
void closeDevice();
static void CALLBACK callbackProc(HWAVEIN hwi, UINT uMsg, DWORD_PTR dwInstance, DWORD_PTR dwParam1, DWORD_PTR dwParam2);
static void threadProc(void* arg);
};
class WmmeOutputDevice: public OutputDevice
{
public:
WmmeOutputDevice(int index);
~WmmeOutputDevice();
bool open();
void close();
HWAVEOUT handle();
unsigned playedTime();
void setFakeMode(bool fakemode);
bool fakeMode();
bool closing();
protected:
class Buffer
{
friend class WmmeOutputDevice;
public:
Buffer();
~Buffer();
bool prepare(HWAVEOUT device);
bool unprepare(HWAVEOUT device);
bool write(HWAVEOUT device);
protected:
WAVEHDR* mHeader;
void* mData;
HGLOBAL mHeaderHandle;
HGLOBAL mDataHandle;
};
Mutex mGuard; /// Mutex to protect this instance
int mDeviceIndex;
HWAVEOUT mDevice; /// Handle of opened audio device
Buffer mBufferList[AUDIO_SPK_BUFFER_COUNT];
unsigned mPlayedTime; /// Amount of played time in milliseconds
bool mClosing;
HANDLE mDoneSignal,
mShutdownSignal,
mThreadHandle;
volatile bool mShutdownMarker;
volatile LONG mPlayedCount;
unsigned mBufferIndex;
bool mFailed;
void openDevice();
void closeDevice();
bool areBuffersFinished();
static void CALLBACK callbackProc(HWAVEOUT hwo, UINT msg, DWORD_PTR dwInstance, DWORD_PTR dwParam1, DWORD_PTR dwParam2);
static void threadProc(void* arg);
};
}
#endif
#endif

View File

@@ -0,0 +1,2 @@
#include "Audio_iOS.h"

View File

@@ -0,0 +1,38 @@
#ifndef __AUDIO_IOS
#define __AUDIO_IOS
class IosInputDevice: public InputDevice
{
protected:
public:
IosInputDevice();
~IosInputDevice();
void open();
void close();
};
class IosOutputDevice: public OutputDevice
{
protected:
public:
IosOutputDevice();
~IosOutputDevice();
enum
{
Receiver,
Speaker,
Bluetooth
};
int route();
void setRoute(int route);
void open();
void close();
};
#endif

View File

@@ -0,0 +1,22 @@
project (audio_lib)
# Rely on C++ 11
set (CMAKE_CXX_STANDARD 11)
set (CMAKE_CXX_STANDARD_REQUIRED ON)
set (AUDIOLIB_SOURCES
Audio_Resampler.cpp
Audio_Quality.cpp
Audio_Mixer.cpp
Audio_Interface.cpp
Audio_Helper.cpp
Audio_DataWindow.cpp
Audio_DevicePair.cpp
Audio_Player.cpp
Audio_Null.cpp
Audio_CoreAudio.cpp
Audio_DirectSound.cpp
Audio_WavFile.cpp
)
add_library(audio_lib ${AUDIOLIB_SOURCES})