[haiku-commits] r34482 - in haiku/trunk: headers/os/media src/kits/media

  • From: axeld@xxxxxxxxxxxxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Thu, 3 Dec 2009 23:16:54 +0100 (CET)

Author: axeld
Date: 2009-12-03 23:16:54 +0100 (Thu, 03 Dec 2009)
New Revision: 34482
Changeset: http://dev.haiku-os.org/changeset/34482/haiku

Modified:
   haiku/trunk/headers/os/media/SoundPlayer.h
   haiku/trunk/src/kits/media/SoundPlayNode.cpp
   haiku/trunk/src/kits/media/SoundPlayNode.h
   haiku/trunk/src/kits/media/SoundPlayer.cpp
Log:
* Moved _SoundPlayNode into the BPrivate namespace and dismissed the '_' prefix.
* Further cleanup.


Modified: haiku/trunk/headers/os/media/SoundPlayer.h
===================================================================
--- haiku/trunk/headers/os/media/SoundPlayer.h  2009-12-03 21:59:08 UTC (rev 
34481)
+++ haiku/trunk/headers/os/media/SoundPlayer.h  2009-12-03 22:16:54 UTC (rev 
34482)
@@ -16,7 +16,9 @@
 class BContinuousParameter;
 class BParameterWeb;
 class BSound;
-class _SoundPlayNode;
+namespace BPrivate {
+       class SoundPlayNode;
+}
 
 
 class sound_error : public std::exception {
@@ -155,10 +157,10 @@
                                                                        const 
media_raw_audio_format& format);
 
 private:
-       friend class _SoundPlayNode;
+       friend class BPrivate::SoundPlayNode;
 
-       struct _playing_sound {
-               _playing_sound* next;
+       struct playing_sound {
+               playing_sound*  next;
                off_t                   current_offset;
                BSound*                 sound;
                play_id                 id;
@@ -168,8 +170,8 @@
                float                   volume;
        };
 
-       struct _waiting_sound {
-               _waiting_sound* next;
+       struct waiting_sound {
+               waiting_sound*  next;
                bigtime_t               start_time;
                BSound*                 sound;
                play_id                 id;
@@ -177,10 +179,10 @@
                float                   volume;
        };
 
-                       _SoundPlayNode*         fPlayerNode;
+                       BPrivate::SoundPlayNode* fPlayerNode;
 
-                       _playing_sound*         fPlayingSounds;
-                       _waiting_sound*         fWaitingSounds;
+                       playing_sound*          fPlayingSounds;
+                       waiting_sound*          fWaitingSounds;
 
                        BufferPlayerFunc        fPlayBufferFunc;
                        EventNotifierFunc       fNotifierFunc;

Modified: haiku/trunk/src/kits/media/SoundPlayNode.cpp
===================================================================
--- haiku/trunk/src/kits/media/SoundPlayNode.cpp        2009-12-03 21:59:08 UTC 
(rev 34481)
+++ haiku/trunk/src/kits/media/SoundPlayNode.cpp        2009-12-03 22:16:54 UTC 
(rev 34482)
@@ -8,9 +8,7 @@
  */
 
 
-/*!    This is the BBufferProducer, used internally by BSoundPlayer
-       This belongs into a private namespace, but isn't for
-       compatibility reasons.
+/*!    This is the BBufferProducer used internally by BSoundPlayer.
 */
 
 
@@ -28,25 +26,28 @@
 #define SEND_NEW_BUFFER_EVENT (BTimedEventQueue::B_USER_EVENT + 1)
 
 
-_SoundPlayNode::_SoundPlayNode(const char* name, BSoundPlayer* player)
+namespace BPrivate {
+
+
+SoundPlayNode::SoundPlayNode(const char* name, BSoundPlayer* player)
        :
        BMediaNode(name),
        BBufferProducer(B_MEDIA_RAW_AUDIO),
        BMediaEventLooper(),
-       mPlayer(player),
-       mInitCheckStatus(B_OK),
-       mOutputEnabled(true),
-       mBufferGroup(NULL),
-       mFramesSent(0),
-       mTooEarlyCount(0)
+       fPlayer(player),
+       fInitStatus(B_OK),
+       fOutputEnabled(true),
+       fBufferGroup(NULL),
+       fFramesSent(0),
+       fTooEarlyCount(0)
 {
        CALLED();
-       mOutput.format.type = B_MEDIA_RAW_AUDIO;
-       mOutput.format.u.raw_audio = media_multi_audio_format::wildcard;
+       fOutput.format.type = B_MEDIA_RAW_AUDIO;
+       fOutput.format.u.raw_audio = media_multi_audio_format::wildcard;
 }
 
 
-_SoundPlayNode::~_SoundPlayNode()
+SoundPlayNode::~SoundPlayNode()
 {
        CALLED();
        Quit();
@@ -54,25 +55,25 @@
 
 
 bool
-_SoundPlayNode::IsPlaying()
+SoundPlayNode::IsPlaying()
 {
        return RunState() == B_STARTED;
 }
 
 
 bigtime_t
-_SoundPlayNode::CurrentTime()
+SoundPlayNode::CurrentTime()
 {
-       int frameRate = (int)mOutput.format.u.raw_audio.frame_rate;
+       int frameRate = (int)fOutput.format.u.raw_audio.frame_rate;
        return frameRate == 0 ? 0
-               : bigtime_t((1000000LL * mFramesSent) / frameRate);
+               : bigtime_t((1000000LL * fFramesSent) / frameRate);
 }
 
 
 media_multi_audio_format
-_SoundPlayNode::Format() const
+SoundPlayNode::Format() const
 {
-       return mOutput.format.u.raw_audio;
+       return fOutput.format.u.raw_audio;
 }
 
 
@@ -80,7 +81,7 @@
 
 
 BMediaAddOn*
-_SoundPlayNode::AddOn(int32* _internalID) const
+SoundPlayNode::AddOn(int32* _internalID) const
 {
        CALLED();
        // This only gets called if we are in an add-on.
@@ -89,7 +90,7 @@
 
 
 void
-_SoundPlayNode::Preroll()
+SoundPlayNode::Preroll()
 {
        CALLED();
        // TODO: Performance opportunity
@@ -98,7 +99,7 @@
 
 
 status_t
-_SoundPlayNode::HandleMessage(int32 message, const void* data, size_t size)
+SoundPlayNode::HandleMessage(int32 message, const void* data, size_t size)
 {
        CALLED();
        return B_ERROR;
@@ -106,31 +107,31 @@
 
 
 void
-_SoundPlayNode::NodeRegistered()
+SoundPlayNode::NodeRegistered()
 {
        CALLED();
 
-       if (mInitCheckStatus != B_OK) {
+       if (fInitStatus != B_OK) {
                ReportError(B_NODE_IN_DISTRESS);
                return;
        }
 
        SetPriority(B_URGENT_PRIORITY);
 
-       mOutput.format.type = B_MEDIA_RAW_AUDIO;
-       mOutput.format.u.raw_audio = media_multi_audio_format::wildcard;
-       mOutput.destination = media_destination::null;
-       mOutput.source.port = ControlPort();
-       mOutput.source.id = 0;
-       mOutput.node = Node();
-       strcpy(mOutput.name, Name());
+       fOutput.format.type = B_MEDIA_RAW_AUDIO;
+       fOutput.format.u.raw_audio = media_multi_audio_format::wildcard;
+       fOutput.destination = media_destination::null;
+       fOutput.source.port = ControlPort();
+       fOutput.source.id = 0;
+       fOutput.node = Node();
+       strcpy(fOutput.name, Name());
 
        Run();
 }
 
 
 status_t
-_SoundPlayNode::RequestCompleted(const media_request_info& info)
+SoundPlayNode::RequestCompleted(const media_request_info& info)
 {
        CALLED();
        return B_OK;
@@ -138,7 +139,7 @@
 
 
 void
-_SoundPlayNode::SetTimeSource(BTimeSource* timeSource)
+SoundPlayNode::SetTimeSource(BTimeSource* timeSource)
 {
        CALLED();
        BMediaNode::SetTimeSource(timeSource);
@@ -146,9 +147,9 @@
 
 
 void
-_SoundPlayNode::SetRunMode(run_mode mode)
+SoundPlayNode::SetRunMode(run_mode mode)
 {
-       TRACE("_SoundPlayNode::SetRunMode mode:%i\n", mode);
+       TRACE("SoundPlayNode::SetRunMode mode:%i\n", mode);
        BMediaNode::SetRunMode(mode);
 }
 
@@ -157,7 +158,7 @@
 
 
 status_t
-_SoundPlayNode::FormatSuggestionRequested(media_type type, int32 /*quality*/,
+SoundPlayNode::FormatSuggestionRequested(media_type type, int32 /*quality*/,
        media_format* format)
 {
        // FormatSuggestionRequested() is not necessarily part of the format
@@ -179,7 +180,7 @@
 
 
 status_t
-_SoundPlayNode::FormatProposal(const media_source& output, media_format* 
format)
+SoundPlayNode::FormatProposal(const media_source& output, media_format* format)
 {
        // FormatProposal() is the first stage in the BMediaRoster::Connect()
        // process. We hand out a suggested format, with wildcards for any
@@ -187,8 +188,8 @@
        CALLED();
 
        // is this a proposal for our one output?
-       if (output != mOutput.source) {
-               TRACE("_SoundPlayNode::FormatProposal returning 
B_MEDIA_BAD_SOURCE\n");
+       if (output != fOutput.source) {
+               TRACE("SoundPlayNode::FormatProposal returning 
B_MEDIA_BAD_SOURCE\n");
                return B_MEDIA_BAD_SOURCE;
        }
 
@@ -198,14 +199,14 @@
 
        // if not raw audio, we can't support it
        if (format->type != B_MEDIA_RAW_AUDIO) {
-               TRACE("_SoundPlayNode::FormatProposal returning 
B_MEDIA_BAD_FORMAT\n");
+               TRACE("SoundPlayNode::FormatProposal returning 
B_MEDIA_BAD_FORMAT\n");
                return B_MEDIA_BAD_FORMAT;
        }
 
 #if DEBUG >0
        char buf[100];
        string_for_format(*format, buf, sizeof(buf));
-       TRACE("_SoundPlayNode::FormatProposal: format %s\n", buf);
+       TRACE("SoundPlayNode::FormatProposal: format %s\n", buf);
 #endif
 
        return B_OK;
@@ -213,7 +214,7 @@
 
 
 status_t
-_SoundPlayNode::FormatChangeRequested(const media_source& source,
+SoundPlayNode::FormatChangeRequested(const media_source& source,
        const media_destination& destination, media_format* _format,
        int32* /* deprecated */)
 {
@@ -225,12 +226,12 @@
 
 
 status_t
-_SoundPlayNode::GetNextOutput(int32* cookie, media_output* _output)
+SoundPlayNode::GetNextOutput(int32* cookie, media_output* _output)
 {
        CALLED();
 
        if (*cookie == 0) {
-               *_output = mOutput;
+               *_output = fOutput;
                *cookie += 1;
                return B_OK;
        } else {
@@ -240,7 +241,7 @@
 
 
 status_t
-_SoundPlayNode::DisposeOutputCookie(int32 cookie)
+SoundPlayNode::DisposeOutputCookie(int32 cookie)
 {
        CALLED();
        // do nothing because we don't use the cookie for anything special
@@ -249,19 +250,19 @@
 
 
 status_t
-_SoundPlayNode::SetBufferGroup(const media_source& forSource,
+SoundPlayNode::SetBufferGroup(const media_source& forSource,
        BBufferGroup* newGroup)
 {
        CALLED();
 
        // is this our output?
-       if (forSource != mOutput.source) {
-               TRACE("_SoundPlayNode::SetBufferGroup returning 
B_MEDIA_BAD_SOURCE\n");
+       if (forSource != fOutput.source) {
+               TRACE("SoundPlayNode::SetBufferGroup returning 
B_MEDIA_BAD_SOURCE\n");
                return B_MEDIA_BAD_SOURCE;
        }
 
        // Are we being passed the buffer group we're already using?
-       if (newGroup == mBufferGroup)
+       if (newGroup == fBufferGroup)
                return B_OK;
 
        // Ahh, someone wants us to use a different buffer group. At this point 
we
@@ -270,19 +271,19 @@
        // use *that*. Note that if we're caching a BBuffer that we requested
        // earlier, we have to Recycle() that buffer *before* deleting the 
buffer
        // group, otherwise we'll deadlock waiting for that buffer to be 
recycled!
-       delete mBufferGroup;
+       delete fBufferGroup;
                // waits for all buffers to recycle
        if (newGroup != NULL) {
                // we were given a valid group; just use that one from now on
-               mBufferGroup = newGroup;
+               fBufferGroup = newGroup;
        } else {
                // we were passed a NULL group pointer; that means we construct
                // our own buffer group to use from now on
-               size_t size = mOutput.format.u.raw_audio.buffer_size;
-               int32 count = int32(mLatency / BufferDuration() + 1 + 1);
+               size_t size = fOutput.format.u.raw_audio.buffer_size;
+               int32 count = int32(fLatency / BufferDuration() + 1 + 1);
                if (count < 3)
                        count = 3;
-               mBufferGroup = new BBufferGroup(size, count);
+               fBufferGroup = new BBufferGroup(size, count);
        }
 
        return B_OK;
@@ -290,7 +291,7 @@
 
 
 status_t
-_SoundPlayNode::GetLatency(bigtime_t* _latency)
+SoundPlayNode::GetLatency(bigtime_t* _latency)
 {
        CALLED();
 
@@ -301,7 +302,7 @@
 
 
 status_t
-_SoundPlayNode::PrepareToConnect(const media_source& what,
+SoundPlayNode::PrepareToConnect(const media_source& what,
        const media_destination& where, media_format* format,
        media_source* _source, char* _name)
 {
@@ -314,14 +315,14 @@
        CALLED();
 
        // is this our output?
-       if (what != mOutput.source)     {
-               TRACE("_SoundPlayNode::PrepareToConnect returning "
+       if (what != fOutput.source)     {
+               TRACE("SoundPlayNode::PrepareToConnect returning "
                        "B_MEDIA_BAD_SOURCE\n");
                return B_MEDIA_BAD_SOURCE;
        }
 
        // are we already connected?
-       if (mOutput.destination != media_destination::null)
+       if (fOutput.destination != media_destination::null)
                return B_MEDIA_ALREADY_CONNECTED;
 
        // the format may not yet be fully specialized (the consumer might have
@@ -331,13 +332,13 @@
 #if DEBUG > 0
        char buf[100];
        string_for_format(*format, buf, sizeof(buf));
-       TRACE("_SoundPlayNode::PrepareToConnect: input format %s\n", buf);
+       TRACE("SoundPlayNode::PrepareToConnect: input format %s\n", buf);
 #endif
 
        // if not raw audio, we can't support it
        if (format->type != B_MEDIA_UNKNOWN_TYPE
                && format->type != B_MEDIA_RAW_AUDIO) {
-               TRACE("_SoundPlayNode::PrepareToConnect: non raw format, 
returning "
+               TRACE("SoundPlayNode::PrepareToConnect: non raw format, 
returning "
                        "B_MEDIA_BAD_FORMAT\n");
                return B_MEDIA_BAD_FORMAT;
        }
@@ -354,7 +355,7 @@
                && *(uint32 *)&format->user_data[44] == 
FORMAT_USER_DATA_MAGIC_2) {
                channel_count = *(uint32 *)&format->user_data[4];
                frame_rate = *(float *)&format->user_data[20];
-               TRACE("_SoundPlayNode::PrepareToConnect: found mixer info: "
+               TRACE("SoundPlayNode::PrepareToConnect: found mixer info: "
                        "channel_count %ld, frame_rate %.1f\n", channel_count, 
frame_rate);
        }
 
@@ -377,28 +378,28 @@
 
 #if DEBUG > 0
        string_for_format(*format, buf, sizeof(buf));
-       TRACE("_SoundPlayNode::PrepareToConnect: output format %s\n", buf);
+       TRACE("SoundPlayNode::PrepareToConnect: output format %s\n", buf);
 #endif
 
        // Now reserve the connection, and return information about it
-       mOutput.destination = where;
-       mOutput.format = *format;
-       *_source = mOutput.source;
+       fOutput.destination = where;
+       fOutput.format = *format;
+       *_source = fOutput.source;
        strcpy(_name, Name());
        return B_OK;
 }
 
 
 void
-_SoundPlayNode::Connect(status_t error, const media_source& source,
+SoundPlayNode::Connect(status_t error, const media_source& source,
        const media_destination& destination, const media_format& format,
        char* name)
 {
        CALLED();
 
        // is this our output?
-       if (source != mOutput.source) {
-               TRACE("_SoundPlayNode::Connect returning\n");
+       if (source != fOutput.source) {
+               TRACE("SoundPlayNode::Connect returning\n");
                return;
        }
 
@@ -406,85 +407,85 @@
        // a non-zero error code.  When that happens we simply unreserve the
        // connection and do nothing else.
        if (error) {
-               mOutput.destination = media_destination::null;
-               mOutput.format.type = B_MEDIA_RAW_AUDIO;
-               mOutput.format.u.raw_audio = media_multi_audio_format::wildcard;
+               fOutput.destination = media_destination::null;
+               fOutput.format.type = B_MEDIA_RAW_AUDIO;
+               fOutput.format.u.raw_audio = media_multi_audio_format::wildcard;
                return;
        }
 
        // Okay, the connection has been confirmed.  Record the destination and
        // format that we agreed on, and report our connection name again.
-       mOutput.destination = destination;
-       mOutput.format = format;
+       fOutput.destination = destination;
+       fOutput.format = format;
        strcpy(name, Name());
 
        // Now that we're connected, we can determine our downstream latency.
        // Do so, then make sure we get our events early enough.
        media_node_id id;
-       FindLatencyFor(mOutput.destination, &mLatency, &id);
-       TRACE("_SoundPlayNode::Connect: downstream latency = %Ld\n", mLatency);
+       FindLatencyFor(fOutput.destination, &fLatency, &id);
+       TRACE("SoundPlayNode::Connect: downstream latency = %Ld\n", fLatency);
 
        // reset our buffer duration, etc. to avoid later calculations
-       bigtime_t duration = ((mOutput.format.u.raw_audio.buffer_size * 
1000000LL)
-               / ((mOutput.format.u.raw_audio.format
+       bigtime_t duration = ((fOutput.format.u.raw_audio.buffer_size * 
1000000LL)
+               / ((fOutput.format.u.raw_audio.format
                                & media_raw_audio_format::B_AUDIO_SIZE_MASK)
-                       * mOutput.format.u.raw_audio.channel_count))
-               / (int32)mOutput.format.u.raw_audio.frame_rate;
+                       * fOutput.format.u.raw_audio.channel_count))
+               / (int32)fOutput.format.u.raw_audio.frame_rate;
        SetBufferDuration(duration);
-       TRACE("_SoundPlayNode::Connect: buffer duration is %Ld\n", duration);
+       TRACE("SoundPlayNode::Connect: buffer duration is %Ld\n", duration);
 
-       mInternalLatency = (3 * BufferDuration()) / 4;
-       TRACE("_SoundPlayNode::Connect: using %Ld as internal latency\n",
-               mInternalLatency);
-       SetEventLatency(mLatency + mInternalLatency);
+       fInternalLatency = (3 * BufferDuration()) / 4;
+       TRACE("SoundPlayNode::Connect: using %Ld as internal latency\n",
+               fInternalLatency);
+       SetEventLatency(fLatency + fInternalLatency);
 
        // Set up the buffer group for our connection, as long as nobody handed 
us
        // a buffer group (via SetBufferGroup()) prior to this.
        // That can happen, for example, if the consumer calls 
SetOutputBuffersFor()
        // on us from within its Connected() method.
-       if (!mBufferGroup)
+       if (!fBufferGroup)
                AllocateBuffers();
 }
 
 
 void
-_SoundPlayNode::Disconnect(const media_source& what,
+SoundPlayNode::Disconnect(const media_source& what,
        const media_destination& where)
 {
        CALLED();
 
        // is this our output?
-       if (what != mOutput.source) {
-               TRACE("_SoundPlayNode::Disconnect returning\n");
+       if (what != fOutput.source) {
+               TRACE("SoundPlayNode::Disconnect returning\n");
                return;
        }
 
        // Make sure that our connection is the one being disconnected
-       if (where == mOutput.destination && what == mOutput.source) {
-               mOutput.destination = media_destination::null;
-               mOutput.format.type = B_MEDIA_RAW_AUDIO;
-               mOutput.format.u.raw_audio = media_multi_audio_format::wildcard;
-               delete mBufferGroup;
-               mBufferGroup = NULL;
+       if (where == fOutput.destination && what == fOutput.source) {
+               fOutput.destination = media_destination::null;
+               fOutput.format.type = B_MEDIA_RAW_AUDIO;
+               fOutput.format.u.raw_audio = media_multi_audio_format::wildcard;
+               delete fBufferGroup;
+               fBufferGroup = NULL;
        } else {
                fprintf(stderr, "\tDisconnect() called with wrong 
source/destination (%ld/%ld), ours is (%ld/%ld)\n",
-                       what.id, where.id, mOutput.source.id, 
mOutput.destination.id);
+                       what.id, where.id, fOutput.source.id, 
fOutput.destination.id);
        }
 }
 
 
 void
-_SoundPlayNode::LateNoticeReceived(const media_source& what, bigtime_t howMuch,
+SoundPlayNode::LateNoticeReceived(const media_source& what, bigtime_t howMuch,
        bigtime_t performanceTime)
 {
        CALLED();
 
-       TRACE("_SoundPlayNode::LateNoticeReceived, %Ld too late at %Ld\n", 
howMuch,
+       TRACE("SoundPlayNode::LateNoticeReceived, %Ld too late at %Ld\n", 
howMuch,
                performanceTime);
 
        // is this our output?
-       if (what != mOutput.source) {
-               TRACE("_SoundPlayNode::LateNoticeReceived returning\n");
+       if (what != fOutput.source) {
+               TRACE("SoundPlayNode::LateNoticeReceived returning\n");
                return;
        }
 
@@ -495,31 +496,31 @@
                // that at the moment, so we try to start producing buffers 
earlier to
                // compensate.
 
-               mInternalLatency += howMuch;
+               fInternalLatency += howMuch;
 
-               if (mInternalLatency > 30000)   // avoid getting a too high 
latency
-                       mInternalLatency = 30000;
+               if (fInternalLatency > 30000)   // avoid getting a too high 
latency
+                       fInternalLatency = 30000;
 
-               SetEventLatency(mLatency + mInternalLatency);
-               TRACE("_SoundPlayNode::LateNoticeReceived: increasing latency 
to %Ld\n", mLatency + mInternalLatency);
+               SetEventLatency(fLatency + fInternalLatency);
+               TRACE("SoundPlayNode::LateNoticeReceived: increasing latency to 
%Ld\n", fLatency + fInternalLatency);
        } else {
                // The other run modes dictate various strategies for 
sacrificing data quality
                // in the interests of timely data delivery.  The way *we* do 
this is to skip
                // a buffer, which catches us up in time by one buffer duration.
 
-               size_t nFrames = mOutput.format.u.raw_audio.buffer_size
-                       / ((mOutput.format.u.raw_audio.format & 
media_raw_audio_format::B_AUDIO_SIZE_MASK)
-                       * mOutput.format.u.raw_audio.channel_count);
+               size_t nFrames = fOutput.format.u.raw_audio.buffer_size
+                       / ((fOutput.format.u.raw_audio.format & 
media_raw_audio_format::B_AUDIO_SIZE_MASK)
+                       * fOutput.format.u.raw_audio.channel_count);
 
-               mFramesSent += nFrames;
+               fFramesSent += nFrames;
 
-               TRACE("_SoundPlayNode::LateNoticeReceived: skipping a buffer to 
try to catch up\n");
+               TRACE("SoundPlayNode::LateNoticeReceived: skipping a buffer to 
try to catch up\n");
        }
 }
 
 
 void
-_SoundPlayNode::EnableOutput(const media_source& what, bool enabled,
+SoundPlayNode::EnableOutput(const media_source& what, bool enabled,
        int32* /* deprecated */)
 {
        CALLED();
@@ -531,17 +532,17 @@
        // matches, then set the enable state accordingly.
 
        // is this our output?
-       if (what != mOutput.source) {
-               fprintf(stderr, "_SoundPlayNode::EnableOutput returning\n");
+       if (what != fOutput.source) {
+               fprintf(stderr, "SoundPlayNode::EnableOutput returning\n");
                return;
        }
 
-       mOutputEnabled = enabled;
+       fOutputEnabled = enabled;
 }
 
 
 void
-_SoundPlayNode::AdditionalBufferRequested(const media_source& source,
+SoundPlayNode::AdditionalBufferRequested(const media_source& source,
        media_buffer_id previousBuffer, bigtime_t previousTime,
        const media_seek_tag* previousTag)
 {
@@ -552,22 +553,22 @@
 
 
 void
-_SoundPlayNode::LatencyChanged(const media_source& source,
+SoundPlayNode::LatencyChanged(const media_source& source,
        const media_destination& destination, bigtime_t newLatency, uint32 
flags)
 {
        CALLED();
 
-       TRACE("_SoundPlayNode::LatencyChanged: new_latency %Ld\n", newLatency);
+       TRACE("SoundPlayNode::LatencyChanged: new_latency %Ld\n", newLatency);
 
        // something downstream changed latency, so we need to start producing
        // buffers earlier (or later) than we were previously.  Make sure that 
the
        // connection that changed is ours, and adjust to the new downstream
        // latency if so.
-       if (source == mOutput.source && destination == mOutput.destination) {
-               mLatency = newLatency;
-               SetEventLatency(mLatency + mInternalLatency);
+       if (source == fOutput.source && destination == fOutput.destination) {
+               fLatency = newLatency;
+               SetEventLatency(fLatency + fInternalLatency);
        } else {
-               TRACE("_SoundPlayNode::LatencyChanged: ignored\n");
+               TRACE("SoundPlayNode::LatencyChanged: ignored\n");
        }
 }
 
@@ -576,7 +577,7 @@
 
 
 void
-_SoundPlayNode::HandleEvent(const media_timed_event* event, bigtime_t lateness,
+SoundPlayNode::HandleEvent(const media_timed_event* event, bigtime_t lateness,
        bool realTimeEvent)
 {
        CALLED();
@@ -619,7 +620,7 @@
 // how should we handle late buffers?  drop them?
 // notify the producer?
 status_t
-_SoundPlayNode::SendNewBuffer(const media_timed_event* event,
+SoundPlayNode::SendNewBuffer(const media_timed_event* event,
        bigtime_t lateness, bool realTimeEvent)
 {
        CALLED();
@@ -627,7 +628,7 @@
 
        // make sure we're both started *and* connected before delivering a 
buffer
        if (RunState() != BMediaEventLooper::B_STARTED
-               || mOutput.destination == media_destination::null)
+               || fOutput.destination == media_destination::null)
                return B_OK;
 
        // The event->event_time is the time at which the buffer we are 
preparing
@@ -637,12 +638,12 @@
        // lateness is independent of EventLatency()!
 
        if (lateness > (BufferDuration() / 3) ) {
-               printf("_SoundPlayNode::SendNewBuffer, event scheduled much too 
late, "
+               printf("SoundPlayNode::SendNewBuffer, event scheduled much too 
late, "
                        "lateness is %Ld\n", lateness);
        }
 
        // skip buffer creation if output not enabled
-       if (mOutputEnabled) {
+       if (fOutputEnabled) {
 
                // Get the next buffer of data
                BBuffer* buffer = FillNextBuffer(event->event_time);
@@ -651,26 +652,26 @@
 
                        // If we are ready way too early, decrase internal 
latency
 /*
-                       bigtime_t how_early = event->event_time - 
TimeSource()->Now() - mLatency - mInternalLatency;
+                       bigtime_t how_early = event->event_time - 
TimeSource()->Now() - fLatency - fInternalLatency;
                        if (how_early > 5000) {
 
-                               printf("_SoundPlayNode::SendNewBuffer, event 
scheduled too early, how_early is %Ld\n", how_early);
+                               printf("SoundPlayNode::SendNewBuffer, event 
scheduled too early, how_early is %Ld\n", how_early);
 
-                               if (mTooEarlyCount++ == 5) {
-                                       mInternalLatency -= how_early;
-                                       if (mInternalLatency < 500)
-                                               mInternalLatency = 500;
-                                       printf("_SoundPlayNode::SendNewBuffer 
setting internal latency to %Ld\n", mInternalLatency);
-                                       SetEventLatency(mLatency + 
mInternalLatency);
-                                       mTooEarlyCount = 0;
+                               if (fTooEarlyCount++ == 5) {
+                                       fInternalLatency -= how_early;
+                                       if (fInternalLatency < 500)
+                                               fInternalLatency = 500;
+                                       printf("SoundPlayNode::SendNewBuffer 
setting internal latency to %Ld\n", fInternalLatency);
+                                       SetEventLatency(fLatency + 
fInternalLatency);
+                                       fTooEarlyCount = 0;
                                }
                        }
 */
                        // send the buffer downstream if and only if output is 
enabled
-                       if (B_OK != SendBuffer(buffer, mOutput.destination)) {
+                       if (B_OK != SendBuffer(buffer, fOutput.destination)) {
                                // we need to recycle the buffer
                                // if the call to SendBuffer() fails
-                               printf("_SoundPlayNode::SendNewBuffer: Buffer 
sending "
+                               printf("SoundPlayNode::SendNewBuffer: Buffer 
sending "
                                        "failed\n");
                                buffer->Recycle();
                        }
@@ -678,17 +679,17 @@
        }
 
        // track how much media we've delivered so far
-       size_t nFrames = mOutput.format.u.raw_audio.buffer_size
-               / ((mOutput.format.u.raw_audio.format
+       size_t nFrames = fOutput.format.u.raw_audio.buffer_size
+               / ((fOutput.format.u.raw_audio.format
                        & media_raw_audio_format::B_AUDIO_SIZE_MASK)
-               * mOutput.format.u.raw_audio.channel_count);
-       mFramesSent += nFrames;
+               * fOutput.format.u.raw_audio.channel_count);
+       fFramesSent += nFrames;
 
        // The buffer is on its way; now schedule the next one to go
        // nextEvent is the time at which the buffer should arrive at it's
        // destination
-       bigtime_t nextEvent = mStartTime + bigtime_t((1000000LL * mFramesSent)
-               / (int32)mOutput.format.u.raw_audio.frame_rate);
+       bigtime_t nextEvent = fStartTime + bigtime_t((1000000LL * fFramesSent)
+               / (int32)fOutput.format.u.raw_audio.frame_rate);
        media_timed_event nextBufferEvent(nextEvent, SEND_NEW_BUFFER_EVENT);
        EventQueue()->AddEvent(nextBufferEvent);
 
@@ -697,10 +698,10 @@
 
 
 status_t
-_SoundPlayNode::HandleDataStatus(const media_timed_event* event,
+SoundPlayNode::HandleDataStatus(const media_timed_event* event,
        bigtime_t lateness, bool realTimeEvent)
 {
-       TRACE("_SoundPlayNode::HandleDataStatus status: %li, lateness: %Li\n",
+       TRACE("SoundPlayNode::HandleDataStatus status: %li, lateness: %Li\n",
                event->data, lateness);
 
        switch (event->data) {
@@ -718,7 +719,7 @@
 
 
 status_t
-_SoundPlayNode::HandleStart(const media_timed_event* event, bigtime_t lateness,
+SoundPlayNode::HandleStart(const media_timed_event* event, bigtime_t lateness,
        bool realTimeEvent)
 {
        CALLED();
@@ -727,8 +728,8 @@
                // We want to start sending buffers now, so we set up the 
buffer-sending
                // bookkeeping and fire off the first "produce a buffer" event.
 
-               mFramesSent = 0;
-               mStartTime = event->event_time;
+               fFramesSent = 0;
+               fStartTime = event->event_time;
                media_timed_event firstBufferEvent(event->event_time,
                        SEND_NEW_BUFFER_EVENT);
 
@@ -744,18 +745,18 @@
 
 
 status_t
-_SoundPlayNode::HandleSeek(const media_timed_event* event, bigtime_t lateness,
+SoundPlayNode::HandleSeek(const media_timed_event* event, bigtime_t lateness,
        bool realTimeEvent)
 {
        CALLED();
-       TRACE("_SoundPlayNode::HandleSeek(t=%lld, d=%li, bd=%lld)\n",
+       TRACE("SoundPlayNode::HandleSeek(t=%lld, d=%li, bd=%lld)\n",
                event->event_time, event->data, event->bigdata);
        return B_OK;
 }
 
 
 status_t
-_SoundPlayNode::HandleWarp(const media_timed_event* event, bigtime_t lateness,
+SoundPlayNode::HandleWarp(const media_timed_event* event, bigtime_t lateness,
        bool realTimeEvent)
 {
        CALLED();
@@ -764,7 +765,7 @@
 
 
 status_t
-_SoundPlayNode::HandleStop(const media_timed_event* event, bigtime_t lateness,
+SoundPlayNode::HandleStop(const media_timed_event* event, bigtime_t lateness,
        bool realTimeEvent)
 {
        CALLED();
@@ -777,7 +778,7 @@
 
 
 status_t
-_SoundPlayNode::HandleParameter(const media_timed_event* event,
+SoundPlayNode::HandleParameter(const media_timed_event* event,
        bigtime_t lateness, bool realTimeEvent)
 {
        CALLED();
@@ -786,60 +787,63 @@
 
 
 void
-_SoundPlayNode::AllocateBuffers()
+SoundPlayNode::AllocateBuffers()
 {
        CALLED();
 
        // allocate enough buffers to span our downstream latency, plus one
-       size_t size = mOutput.format.u.raw_audio.buffer_size;
-       int32 count = int32(mLatency / BufferDuration() + 1 + 1);
+       size_t size = fOutput.format.u.raw_audio.buffer_size;
+       int32 count = int32(fLatency / BufferDuration() + 1 + 1);
 
-       TRACE("_SoundPlayNode::AllocateBuffers: latency = %Ld, buffer duration "
-               "= %Ld, count %ld\n", mLatency, BufferDuration(), count);
+       TRACE("SoundPlayNode::AllocateBuffers: latency = %Ld, buffer duration "
+               "= %Ld, count %ld\n", fLatency, BufferDuration(), count);
 
        if (count < 3)
                count = 3;
 
-       TRACE("_SoundPlayNode::AllocateBuffers: creating group of %ld buffers, "
+       TRACE("SoundPlayNode::AllocateBuffers: creating group of %ld buffers, "
                "size = %lu\n", count, size);
 
-       mBufferGroup = new BBufferGroup(size, count);
-       if (mBufferGroup->InitCheck() != B_OK) {
-               ERROR("_SoundPlayNode::AllocateBuffers: 
BufferGroup::InitCheck() "
+       fBufferGroup = new BBufferGroup(size, count);
+       if (fBufferGroup->InitCheck() != B_OK) {
+               ERROR("SoundPlayNode::AllocateBuffers: BufferGroup::InitCheck() 
"
                        "failed\n");
        }
 }
 
 
 BBuffer*
-_SoundPlayNode::FillNextBuffer(bigtime_t event_time)
+SoundPlayNode::FillNextBuffer(bigtime_t eventTime)
 {
        CALLED();
 
        // get a buffer from our buffer group
-       BBuffer* buf = mBufferGroup->RequestBuffer(
-               mOutput.format.u.raw_audio.buffer_size, BufferDuration() / 2);
+       BBuffer* buffer = fBufferGroup->RequestBuffer(
+               fOutput.format.u.raw_audio.buffer_size, BufferDuration() / 2);
 
        // If we fail to get a buffer (for example, if the request times out), 
we
        // skip this buffer and go on to the next, to avoid locking up the 
control
        // thread
-       if (!buf) {
-               ERROR("_SoundPlayNode::FillNextBuffer: RequestBuffer failed\n");
+       if (buffer == NULL) {
+               ERROR("SoundPlayNode::FillNextBuffer: RequestBuffer failed\n");
                return NULL;
        }
 
-       if (mPlayer->HasData()) {
-               mPlayer->_PlayBuffer(buf->Data(),
-                       mOutput.format.u.raw_audio.buffer_size, 
mOutput.format.u.raw_audio);
-       } else {
-               memset(buf->Data(), 0, mOutput.format.u.raw_audio.buffer_size);
-       }
+       if (fPlayer->HasData()) {
+               fPlayer->_PlayBuffer(buffer->Data(),
+                       fOutput.format.u.raw_audio.buffer_size, 
fOutput.format.u.raw_audio);
+       } else
+               memset(buffer->Data(), 0, 
fOutput.format.u.raw_audio.buffer_size);
 
        // fill in the buffer header
-       media_header* hdr = buf->Header();
-       hdr->type = B_MEDIA_RAW_AUDIO;
-       hdr->size_used = mOutput.format.u.raw_audio.buffer_size;
-       hdr->time_source = TimeSource()->ID();
-       hdr->start_time = event_time;
-       return buf;
+       media_header* header = buffer->Header();
+       header->type = B_MEDIA_RAW_AUDIO;
+       header->size_used = fOutput.format.u.raw_audio.buffer_size;
+       header->time_source = TimeSource()->ID();
+       header->start_time = eventTime;
+
+       return buffer;
 }
+
+
+}      // namespace BPrivate

Modified: haiku/trunk/src/kits/media/SoundPlayNode.h
===================================================================
--- haiku/trunk/src/kits/media/SoundPlayNode.h  2009-12-03 21:59:08 UTC (rev 
34481)
+++ haiku/trunk/src/kits/media/SoundPlayNode.h  2009-12-03 22:16:54 UTC (rev 
34482)
@@ -1,177 +1,152 @@
-#ifndef _SOUND_PLAY_NODE_
-#define _SOUND_PLAY_NODE_
+/*
+ * Copyright 2002-2009, Haiku.
+ * Distributed under the terms of the MIT License.
+ *
+ * Authors:
+ *             Marcus Overhagen
+ *             Jérôme Duval
+ */
+#ifndef _SOUND_PLAY_NODE_H
+#define _SOUND_PLAY_NODE_H
 
-#include <BufferProducer.h>
-#include <MediaEventLooper.h>
+
 #include <Buffer.h>
 #include <BufferGroup.h>
-#include "SoundPlayer.h"
+#include <BufferProducer.h>
+#include <MediaEventLooper.h>
+#include <SoundPlayer.h>
 
-/***********************************************************************
- * AUTHOR: Marcus Overhagen, Jérôme Duval
- *   FILE: SoundPlayNode.h
- *  DESCR: This is the BBufferProducer, used internally by BSoundPlayer
- *         This belongs into a private namespace, but isn't for 
- *         compatibility reasons.
- ***********************************************************************/
 
-class _SoundPlayNode 
-       : public BBufferProducer, public BMediaEventLooper
-{
+namespace BPrivate {
+
+
+class SoundPlayNode : public BBufferProducer, public BMediaEventLooper {
 public:
-       _SoundPlayNode(const char *name, BSoundPlayer *player);
-       ~_SoundPlayNode();
-       
-       bool IsPlaying();
-       bigtime_t CurrentTime();
-       
-/*************************/
-/* begin from BMediaNode */
-public:
-virtual        BMediaAddOn* AddOn(
-                               int32 * internal_id) const;     /* Who 
instantiated you -- or NULL for app class */
+                                                               
SoundPlayNode(const char* name,
+                                                                       
BSoundPlayer* player);
+       virtual                                         ~SoundPlayNode();
 
+                       bool                            IsPlaying();
+                       bigtime_t                       CurrentTime();
+
+       // BMediaNode methods
+
+       virtual BMediaAddOn*            AddOn(int32* _internalID) const;

[... truncated: 345 lines follow ...]

Other related posts:

  • » [haiku-commits] r34482 - in haiku/trunk: headers/os/media src/kits/media - axeld