Merge pull request #51296 from ellenhp/mix_in_audio_server

Move mixing out of the AudioStreamPlayback* nodes
This commit is contained in:
Juan Linietsky 2021-08-27 15:38:20 -03:00 committed by GitHub
commit 54caaa21ce
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
22 changed files with 1494 additions and 958 deletions

375
core/templates/safe_list.h Normal file
View file

@ -0,0 +1,375 @@
/*************************************************************************/
/* safe_list.h */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/*************************************************************************/
/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */
/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#ifndef SAFE_LIST_H
#define SAFE_LIST_H
#include "core/os/memory.h"
#include "core/typedefs.h"
#include <functional>
#if !defined(NO_THREADS)
#include <atomic>
#include <type_traits>
// Design goals for these classes:
// - Accessing this list with an iterator will never result in a use-after free,
// even if the element being accessed has been logically removed from the list on
// another thread.
// - Logical deletion from the list will not result in deallocation at that time,
// instead the node will be deallocated at a later time when it is safe to do so.
// - No blocking synchronization primitives will be used.
// This is used in very specific areas of the engine where it's critical that these guarantees are held.
template <class T, class A = DefaultAllocator>
class SafeList {
struct SafeListNode {
std::atomic<SafeListNode *> next = nullptr;
// If the node is logically deleted, this pointer will typically point
// to the previous list item in time that was also logically deleted.
std::atomic<SafeListNode *> graveyard_next = nullptr;
std::function<void(T)> deletion_fn = [](T t) { return; };
T val;
};
static_assert(std::atomic<T>::is_always_lock_free);
std::atomic<SafeListNode *> head = nullptr;
std::atomic<SafeListNode *> graveyard_head = nullptr;
std::atomic_uint active_iterator_count = 0;
public:
class Iterator {
friend class SafeList;
SafeListNode *cursor;
SafeList *list;
Iterator(SafeListNode *p_cursor, SafeList *p_list) :
cursor(p_cursor), list(p_list) {
list->active_iterator_count++;
}
public:
Iterator(const Iterator &p_other) :
cursor(p_other.cursor), list(p_other.list) {
list->active_iterator_count++;
}
~Iterator() {
list->active_iterator_count--;
}
public:
T &operator*() {
return cursor->val;
}
Iterator &operator++() {
cursor = cursor->next;
return *this;
}
// These two operators are mostly useful for comparisons to nullptr.
bool operator==(const void *p_other) const {
return cursor == p_other;
}
bool operator!=(const void *p_other) const {
return cursor != p_other;
}
// These two allow easy range-based for loops.
bool operator==(const Iterator &p_other) const {
return cursor == p_other.cursor;
}
bool operator!=(const Iterator &p_other) const {
return cursor != p_other.cursor;
}
};
public:
// Calling this will cause an allocation.
void insert(T p_value) {
SafeListNode *new_node = memnew_allocator(SafeListNode, A);
new_node->val = p_value;
SafeListNode *expected_head = nullptr;
do {
expected_head = head.load();
new_node->next.store(expected_head);
} while (!head.compare_exchange_strong(/* expected= */ expected_head, /* new= */ new_node));
}
Iterator find(T p_value) {
for (Iterator it = begin(); it != end(); ++it) {
if (*it == p_value) {
return it;
}
}
return end();
}
void erase(T p_value, std::function<void(T)> p_deletion_fn) {
Iterator tmp = find(p_value);
erase(tmp, p_deletion_fn);
}
void erase(T p_value) {
Iterator tmp = find(p_value);
erase(tmp, [](T t) { return; });
}
void erase(Iterator &p_iterator, std::function<void(T)> p_deletion_fn) {
p_iterator.cursor->deletion_fn = p_deletion_fn;
erase(p_iterator);
}
void erase(Iterator &p_iterator) {
if (find(p_iterator.cursor->val) == nullptr) {
// Not in the list, nothing to do.
return;
}
// First, remove the node from the list.
while (true) {
Iterator prev = begin();
SafeListNode *expected_head = prev.cursor;
for (; prev != end(); ++prev) {
if (prev.cursor && prev.cursor->next == p_iterator.cursor) {
break;
}
}
if (prev != end()) {
// There exists a node before this.
prev.cursor->next.store(p_iterator.cursor->next.load());
// Done.
break;
} else {
if (head.compare_exchange_strong(/* expected= */ expected_head, /* new= */ p_iterator.cursor->next.load())) {
// Successfully reassigned the head pointer before another thread changed it to something else.
break;
}
// Fall through upon failure, try again.
}
}
// Then queue it for deletion by putting it in the node graveyard.
// Don't touch `next` because an iterator might still be pointing at this node.
SafeListNode *expected_head = nullptr;
do {
expected_head = graveyard_head.load();
p_iterator.cursor->graveyard_next.store(expected_head);
} while (!graveyard_head.compare_exchange_strong(/* expected= */ expected_head, /* new= */ p_iterator.cursor));
}
Iterator begin() {
return Iterator(head.load(), this);
}
Iterator end() {
return Iterator(nullptr, this);
}
// Calling this will cause zero to many deallocations.
void maybe_cleanup() {
SafeListNode *cursor = nullptr;
SafeListNode *new_graveyard_head = nullptr;
do {
// The access order here is theoretically important.
cursor = graveyard_head.load();
if (active_iterator_count.load() != 0) {
// It's not safe to clean up with an active iterator, because that iterator
// could be pointing to an element that we want to delete.
return;
}
// Any iterator created after this point will never point to a deleted node.
// Swap it out with the current graveyard head.
} while (!graveyard_head.compare_exchange_strong(/* expected= */ cursor, /* new= */ new_graveyard_head));
// Our graveyard list is now unreachable by any active iterators,
// detached from the main graveyard head and ready for deletion.
while (cursor) {
SafeListNode *tmp = cursor;
cursor = cursor->graveyard_next;
tmp->deletion_fn(tmp->val);
memdelete_allocator<SafeListNode, A>(tmp);
}
}
};
#else // NO_THREADS
// Effectively the same structure without the atomics. It's probably possible to simplify it but the semantics shouldn't differ greatly.
template <class T, class A = DefaultAllocator>
class SafeList {
struct SafeListNode {
SafeListNode *next = nullptr;
// If the node is logically deleted, this pointer will typically point to the previous list item in time that was also logically deleted.
SafeListNode *graveyard_next = nullptr;
std::function<void(T)> deletion_fn = [](T t) { return; };
T val;
};
SafeListNode *head = nullptr;
SafeListNode *graveyard_head = nullptr;
unsigned int active_iterator_count = 0;
public:
class Iterator {
friend class SafeList;
SafeListNode *cursor;
SafeList *list;
public:
Iterator(SafeListNode *p_cursor, SafeList *p_list) :
cursor(p_cursor), list(p_list) {
list->active_iterator_count++;
}
~Iterator() {
list->active_iterator_count--;
}
T &operator*() {
return cursor->val;
}
Iterator &operator++() {
cursor = cursor->next;
return *this;
}
// These two operators are mostly useful for comparisons to nullptr.
bool operator==(const void *p_other) const {
return cursor == p_other;
}
bool operator!=(const void *p_other) const {
return cursor != p_other;
}
// These two allow easy range-based for loops.
bool operator==(const Iterator &p_other) const {
return cursor == p_other.cursor;
}
bool operator!=(const Iterator &p_other) const {
return cursor != p_other.cursor;
}
};
public:
// Calling this will cause an allocation.
void insert(T p_value) {
SafeListNode *new_node = memnew_allocator(SafeListNode, A);
new_node->val = p_value;
new_node->next = head;
head = new_node;
}
Iterator find(T p_value) {
for (Iterator it = begin(); it != end(); ++it) {
if (*it == p_value) {
return it;
}
}
return end();
}
void erase(T p_value, std::function<void(T)> p_deletion_fn) {
erase(find(p_value), p_deletion_fn);
}
void erase(T p_value) {
erase(find(p_value), [](T t) { return; });
}
void erase(Iterator p_iterator, std::function<void(T)> p_deletion_fn) {
p_iterator.cursor->deletion_fn = p_deletion_fn;
erase(p_iterator);
}
void erase(Iterator p_iterator) {
Iterator prev = begin();
for (; prev != end(); ++prev) {
if (prev.cursor && prev.cursor->next == p_iterator.cursor) {
break;
}
}
if (prev == end()) {
// Not in the list, nothing to do.
return;
}
// First, remove the node from the list.
prev.cursor->next = p_iterator.cursor->next;
// Then queue it for deletion by putting it in the node graveyard. Don't touch `next` because an iterator might still be pointing at this node.
p_iterator.cursor->graveyard_next = graveyard_head;
graveyard_head = p_iterator.cursor;
}
Iterator begin() {
return Iterator(head, this);
}
Iterator end() {
return Iterator(nullptr, this);
}
// Calling this will cause zero to many deallocations.
void maybe_cleanup() {
SafeListNode *cursor = graveyard_head;
if (active_iterator_count != 0) {
// It's not safe to clean up with an active iterator, because that iterator could be pointing to an element that we want to delete.
return;
}
graveyard_head = nullptr;
// Our graveyard list is now unreachable by any active iterators, detached from the main graveyard head and ready for deletion.
while (cursor) {
SafeListNode *tmp = cursor;
cursor = cursor->next;
tmp->deletion_fn(tmp->val);
memdelete_allocator<SafeListNode, A>(tmp);
}
}
};
#endif
#endif // SAFE_LIST_H

View file

@ -26,7 +26,7 @@
</description>
</method>
<method name="_mix" qualifiers="virtual">
<return type="void" />
<return type="int" />
<argument index="0" name="buffer" type="AudioFrame*" />
<argument index="1" name="rate_scale" type="float" />
<argument index="2" name="frames" type="int" />

View file

@ -37,11 +37,13 @@
#include "core/io/file_access.h"
void AudioStreamPlaybackMP3::_mix_internal(AudioFrame *p_buffer, int p_frames) {
ERR_FAIL_COND(!active);
int AudioStreamPlaybackMP3::_mix_internal(AudioFrame *p_buffer, int p_frames) {
ERR_FAIL_COND_V(!active, 0);
int todo = p_frames;
int frames_mixed_this_step = p_frames;
while (todo && active) {
mp3dec_frame_info_t frame_info;
mp3d_sample_t *buf_frame = nullptr;
@ -60,6 +62,7 @@ void AudioStreamPlaybackMP3::_mix_internal(AudioFrame *p_buffer, int p_frames) {
seek(mp3_stream->loop_offset);
loops++;
} else {
frames_mixed_this_step = p_frames - todo;
//fill remainder with silence
for (int i = p_frames - todo; i < p_frames; i++) {
p_buffer[i] = AudioFrame(0, 0);
@ -69,6 +72,7 @@ void AudioStreamPlaybackMP3::_mix_internal(AudioFrame *p_buffer, int p_frames) {
}
}
}
return frames_mixed_this_step;
}
float AudioStreamPlaybackMP3::get_stream_sampling_rate() {

View file

@ -51,7 +51,7 @@ class AudioStreamPlaybackMP3 : public AudioStreamPlaybackResampled {
Ref<AudioStreamMP3> mp3_stream;
protected:
virtual void _mix_internal(AudioFrame *p_buffer, int p_frames) override;
virtual int _mix_internal(AudioFrame *p_buffer, int p_frames) override;
virtual float get_stream_sampling_rate() override;
public:

View file

@ -32,13 +32,15 @@
#include "core/io/file_access.h"
void AudioStreamPlaybackOGGVorbis::_mix_internal(AudioFrame *p_buffer, int p_frames) {
ERR_FAIL_COND(!active);
int AudioStreamPlaybackOGGVorbis::_mix_internal(AudioFrame *p_buffer, int p_frames) {
ERR_FAIL_COND_V(!active, 0);
int todo = p_frames;
int start_buffer = 0;
int frames_mixed_this_step = p_frames;
while (todo && active) {
float *buffer = (float *)p_buffer;
if (start_buffer > 0) {
@ -64,6 +66,7 @@ void AudioStreamPlaybackOGGVorbis::_mix_internal(AudioFrame *p_buffer, int p_fra
// we still have buffer to fill, start from this element in the next iteration.
start_buffer = p_frames - todo;
} else {
frames_mixed_this_step = p_frames - todo;
for (int i = p_frames - todo; i < p_frames; i++) {
p_buffer[i] = AudioFrame(0, 0);
}
@ -72,6 +75,7 @@ void AudioStreamPlaybackOGGVorbis::_mix_internal(AudioFrame *p_buffer, int p_fra
}
}
}
return frames_mixed_this_step;
}
float AudioStreamPlaybackOGGVorbis::get_stream_sampling_rate() {

View file

@ -52,7 +52,7 @@ class AudioStreamPlaybackOGGVorbis : public AudioStreamPlaybackResampled {
Ref<AudioStreamOGGVorbis> vorbis_stream;
protected:
virtual void _mix_internal(AudioFrame *p_buffer, int p_frames) override;
virtual int _mix_internal(AudioFrame *p_buffer, int p_frames) override;
virtual float get_stream_sampling_rate() override;
public:

View file

@ -33,125 +33,17 @@
#include "scene/2d/area_2d.h"
#include "scene/main/window.h"
void AudioStreamPlayer2D::_mix_audio() {
if (!stream_playback.is_valid() || !active.is_set() ||
(stream_paused && !stream_paused_fade_out)) {
return;
}
if (setseek.get() >= 0.0) {
stream_playback->start(setseek.get());
setseek.set(-1.0); //reset seek
}
//get data
AudioFrame *buffer = mix_buffer.ptrw();
int buffer_size = mix_buffer.size();
if (stream_paused_fade_out) {
// Short fadeout ramp
buffer_size = MIN(buffer_size, 128);
}
stream_playback->mix(buffer, pitch_scale, buffer_size);
//write all outputs
int oc = output_count.get();
for (int i = 0; i < oc; i++) {
Output current = outputs[i];
//see if current output exists, to keep volume ramp
bool found = false;
for (int j = i; j < prev_output_count; j++) {
if (prev_outputs[j].viewport == current.viewport) {
if (j != i) {
SWAP(prev_outputs[j], prev_outputs[i]);
}
found = true;
break;
}
}
if (!found) {
//create new if was not used before
if (prev_output_count < MAX_OUTPUTS) {
prev_outputs[prev_output_count] = prev_outputs[i]; //may be owned by another viewport
prev_output_count++;
}
prev_outputs[i] = current;
}
//mix!
AudioFrame target_volume = stream_paused_fade_out ? AudioFrame(0.f, 0.f) : current.vol;
AudioFrame vol_prev = stream_paused_fade_in ? AudioFrame(0.f, 0.f) : prev_outputs[i].vol;
AudioFrame vol_inc = (target_volume - vol_prev) / float(buffer_size);
AudioFrame vol = vol_prev;
int cc = AudioServer::get_singleton()->get_channel_count();
if (cc == 1) {
if (!AudioServer::get_singleton()->thread_has_channel_mix_buffer(current.bus_index, 0)) {
continue; //may have been removed
}
AudioFrame *target = AudioServer::get_singleton()->thread_get_channel_mix_buffer(current.bus_index, 0);
for (int j = 0; j < buffer_size; j++) {
target[j] += buffer[j] * vol;
vol += vol_inc;
}
} else {
AudioFrame *targets[4];
bool valid = true;
for (int k = 0; k < cc; k++) {
if (!AudioServer::get_singleton()->thread_has_channel_mix_buffer(current.bus_index, k)) {
valid = false; //may have been removed
break;
}
targets[k] = AudioServer::get_singleton()->thread_get_channel_mix_buffer(current.bus_index, k);
}
if (!valid) {
continue;
}
for (int j = 0; j < buffer_size; j++) {
AudioFrame frame = buffer[j] * vol;
for (int k = 0; k < cc; k++) {
targets[k][j] += frame;
}
vol += vol_inc;
}
}
prev_outputs[i] = current;
}
prev_output_count = oc;
//stream is no longer active, disable this.
if (!stream_playback->is_playing()) {
active.clear();
}
output_ready.clear();
stream_paused_fade_in = false;
stream_paused_fade_out = false;
}
void AudioStreamPlayer2D::_notification(int p_what) {
if (p_what == NOTIFICATION_ENTER_TREE) {
AudioServer::get_singleton()->add_callback(_mix_audios, this);
AudioServer::get_singleton()->add_listener_changed_callback(_listener_changed_cb, this);
if (autoplay && !Engine::get_singleton()->is_editor_hint()) {
play();
}
}
if (p_what == NOTIFICATION_EXIT_TREE) {
AudioServer::get_singleton()->remove_callback(_mix_audios, this);
stop();
AudioServer::get_singleton()->remove_listener_changed_callback(_listener_changed_cb, this);
}
if (p_what == NOTIFICATION_PAUSED) {
@ -168,109 +60,120 @@ void AudioStreamPlayer2D::_notification(int p_what) {
if (p_what == NOTIFICATION_INTERNAL_PHYSICS_PROCESS) {
//update anything related to position first, if possible of course
if (!output_ready.is_set()) {
Ref<World2D> world_2d = get_world_2d();
ERR_FAIL_COND(world_2d.is_null());
int new_output_count = 0;
Vector2 global_pos = get_global_position();
int bus_index = AudioServer::get_singleton()->thread_find_bus_index(bus);
//check if any area is diverting sound into a bus
PhysicsDirectSpaceState2D *space_state = PhysicsServer2D::get_singleton()->space_get_direct_state(world_2d->get_space());
PhysicsDirectSpaceState2D::ShapeResult sr[MAX_INTERSECT_AREAS];
int areas = space_state->intersect_point(global_pos, sr, MAX_INTERSECT_AREAS, Set<RID>(), area_mask, false, true);
for (int i = 0; i < areas; i++) {
Area2D *area2d = Object::cast_to<Area2D>(sr[i].collider);
if (!area2d) {
continue;
}
if (!area2d->is_overriding_audio_bus()) {
continue;
}
StringName bus_name = area2d->get_audio_bus_name();
bus_index = AudioServer::get_singleton()->thread_find_bus_index(bus_name);
break;
if (!stream_playback.is_valid()) {
return;
}
if (setplay.get() >= 0 || (active.is_set() && last_mix_count != AudioServer::get_singleton()->get_mix_count())) {
_update_panning();
if (setplay.get() >= 0) {
active.set();
AudioServer::get_singleton()->start_playback_stream(stream_playback, _get_actual_bus(), volume_vector, setplay.get());
setplay.set(-1);
}
const Set<Viewport *> viewports = world_2d->get_viewports();
for (Set<Viewport *>::Element *E = viewports.front(); E; E = E->next()) {
Viewport *vp = E->get();
if (vp->is_audio_listener_2d()) {
//compute matrix to convert to screen
Transform2D to_screen = vp->get_global_canvas_transform() * vp->get_canvas_transform();
Vector2 screen_size = vp->get_visible_rect().size;
//screen in global is used for attenuation
Vector2 screen_in_global = to_screen.affine_inverse().xform(screen_size * 0.5);
float dist = global_pos.distance_to(screen_in_global); //distance to screen center
if (dist > max_distance) {
continue; //can't hear this sound in this viewport
}
float multiplier = Math::pow(1.0f - dist / max_distance, attenuation);
multiplier *= Math::db2linear(volume_db); //also apply player volume!
//point in screen is used for panning
Vector2 point_in_screen = to_screen.xform(global_pos);
float pan = CLAMP(point_in_screen.x / screen_size.width, 0.0, 1.0);
float l = 1.0 - pan;
float r = pan;
outputs[new_output_count].vol = AudioFrame(l, r) * multiplier;
outputs[new_output_count].bus_index = bus_index;
outputs[new_output_count].viewport = vp; //keep pointer only for reference
new_output_count++;
if (new_output_count == MAX_OUTPUTS) {
break;
}
}
}
output_count.set(new_output_count);
output_ready.set();
}
//start playing if requested
if (setplay.get() >= 0.0) {
setseek.set(setplay.get());
active.set();
setplay.set(-1);
}
//stop playing if no longer active
if (!active.is_set()) {
// Stop playing if no longer active.
if (active.is_set() && !AudioServer::get_singleton()->is_playback_active(stream_playback)) {
active.clear();
set_physics_process_internal(false);
emit_signal(SNAME("finished"));
}
}
}
void AudioStreamPlayer2D::set_stream(Ref<AudioStream> p_stream) {
AudioServer::get_singleton()->lock();
mix_buffer.resize(AudioServer::get_singleton()->thread_get_mix_buffer_size());
if (stream_playback.is_valid()) {
stream_playback.unref();
stream.unref();
active.clear();
setseek.set(-1);
StringName AudioStreamPlayer2D::_get_actual_bus() {
if (!stream_playback.is_valid()) {
return SNAME("Master");
}
Vector2 global_pos = get_global_position();
//check if any area is diverting sound into a bus
Ref<World2D> world_2d = get_world_2d();
ERR_FAIL_COND_V(world_2d.is_null(), SNAME("Master"));
PhysicsDirectSpaceState2D *space_state = PhysicsServer2D::get_singleton()->space_get_direct_state(world_2d->get_space());
PhysicsDirectSpaceState2D::ShapeResult sr[MAX_INTERSECT_AREAS];
int areas = space_state->intersect_point(global_pos, sr, MAX_INTERSECT_AREAS, Set<RID>(), area_mask, false, true);
for (int i = 0; i < areas; i++) {
Area2D *area2d = Object::cast_to<Area2D>(sr[i].collider);
if (!area2d) {
continue;
}
if (!area2d->is_overriding_audio_bus()) {
continue;
}
return area2d->get_audio_bus_name();
}
return default_bus;
}
void AudioStreamPlayer2D::_update_panning() {
if (!stream_playback.is_valid()) {
return;
}
last_mix_count = AudioServer::get_singleton()->get_mix_count();
Ref<World2D> world_2d = get_world_2d();
ERR_FAIL_COND(world_2d.is_null());
Vector2 global_pos = get_global_position();
Set<Viewport *> viewports = world_2d->get_viewports();
viewports.insert(get_viewport()); // TODO: This is a mediocre workaround for #50958. Remove when that bug is fixed!
volume_vector.resize(4);
volume_vector.write[0] = AudioFrame(0, 0);
volume_vector.write[1] = AudioFrame(0, 0);
volume_vector.write[2] = AudioFrame(0, 0);
volume_vector.write[3] = AudioFrame(0, 0);
for (Viewport *vp : viewports) {
if (!vp->is_audio_listener_2d()) {
continue;
}
//compute matrix to convert to screen
Transform2D to_screen = vp->get_global_canvas_transform() * vp->get_canvas_transform();
Vector2 screen_size = vp->get_visible_rect().size;
//screen in global is used for attenuation
Vector2 screen_in_global = to_screen.affine_inverse().xform(screen_size * 0.5);
float dist = global_pos.distance_to(screen_in_global); //distance to screen center
if (dist > max_distance) {
continue; //can't hear this sound in this viewport
}
float multiplier = Math::pow(1.0f - dist / max_distance, attenuation);
multiplier *= Math::db2linear(volume_db); //also apply player volume!
//point in screen is used for panning
Vector2 point_in_screen = to_screen.xform(global_pos);
float pan = CLAMP(point_in_screen.x / screen_size.width, 0.0, 1.0);
float l = 1.0 - pan;
float r = pan;
volume_vector.write[0] = AudioFrame(l, r) * multiplier;
}
AudioServer::get_singleton()->set_playback_bus_exclusive(stream_playback, _get_actual_bus(), volume_vector);
}
void AudioStreamPlayer2D::set_stream(Ref<AudioStream> p_stream) {
if (stream_playback.is_valid()) {
stop();
}
stream_playback.unref();
stream.unref();
if (p_stream.is_valid()) {
stream_playback = p_stream->instance_playback();
if (stream_playback.is_valid()) {
@ -280,8 +183,6 @@ void AudioStreamPlayer2D::set_stream(Ref<AudioStream> p_stream) {
}
}
AudioServer::get_singleton()->unlock();
if (p_stream.is_valid() && stream_playback.is_null()) {
stream.unref();
}
@ -302,6 +203,9 @@ float AudioStreamPlayer2D::get_volume_db() const {
void AudioStreamPlayer2D::set_pitch_scale(float p_pitch_scale) {
ERR_FAIL_COND(p_pitch_scale <= 0.0);
pitch_scale = p_pitch_scale;
if (stream_playback.is_valid()) {
AudioServer::get_singleton()->set_playback_pitch_scale(stream_playback, p_pitch_scale);
}
}
float AudioStreamPlayer2D::get_pitch_scale() const {
@ -309,27 +213,26 @@ float AudioStreamPlayer2D::get_pitch_scale() const {
}
void AudioStreamPlayer2D::play(float p_from_pos) {
if (!is_playing()) {
// Reset the prev_output_count if the stream is stopped
prev_output_count = 0;
stop();
if (stream.is_valid()) {
stream_playback = stream->instance_playback();
}
if (stream_playback.is_valid()) {
setplay.set(p_from_pos);
output_ready.clear();
set_physics_process_internal(true);
}
}
void AudioStreamPlayer2D::seek(float p_seconds) {
if (stream_playback.is_valid()) {
setseek.set(p_seconds);
if (stream_playback.is_valid() && active.is_set()) {
play(p_seconds);
}
}
void AudioStreamPlayer2D::stop() {
if (stream_playback.is_valid()) {
active.clear();
AudioServer::get_singleton()->stop_playback_stream(stream_playback);
set_physics_process_internal(false);
setplay.set(-1);
}
@ -337,7 +240,7 @@ void AudioStreamPlayer2D::stop() {
bool AudioStreamPlayer2D::is_playing() const {
if (stream_playback.is_valid()) {
return active.is_set() || setplay.get() >= 0;
return AudioServer::get_singleton()->is_playback_active(stream_playback);
}
return false;
@ -345,30 +248,23 @@ bool AudioStreamPlayer2D::is_playing() const {
float AudioStreamPlayer2D::get_playback_position() {
if (stream_playback.is_valid()) {
float ss = setseek.get();
if (ss >= 0.0) {
return ss;
}
return stream_playback->get_playback_position();
return AudioServer::get_singleton()->get_playback_position(stream_playback);
}
return 0;
}
void AudioStreamPlayer2D::set_bus(const StringName &p_bus) {
//if audio is active, must lock this
AudioServer::get_singleton()->lock();
bus = p_bus;
AudioServer::get_singleton()->unlock();
default_bus = p_bus; // This will be pushed to the audio server during the next physics timestep, which is fast enough.
}
StringName AudioStreamPlayer2D::get_bus() const {
for (int i = 0; i < AudioServer::get_singleton()->get_bus_count(); i++) {
if (AudioServer::get_singleton()->get_bus_name(i) == bus) {
return bus;
if (AudioServer::get_singleton()->get_bus_name(i) == default_bus) {
return default_bus;
}
}
return "Master";
return SNAME("Master");
}
void AudioStreamPlayer2D::set_autoplay(bool p_enable) {
@ -388,7 +284,11 @@ void AudioStreamPlayer2D::_set_playing(bool p_enable) {
}
bool AudioStreamPlayer2D::_is_active() const {
return active.is_set();
if (stream_playback.is_valid()) {
// TODO make sure this doesn't change any behavior w.r.t. pauses. Is a paused stream active?
return AudioServer::get_singleton()->is_playback_active(stream_playback);
}
return false;
}
void AudioStreamPlayer2D::_validate_property(PropertyInfo &property) const {
@ -436,15 +336,17 @@ uint32_t AudioStreamPlayer2D::get_area_mask() const {
}
void AudioStreamPlayer2D::set_stream_paused(bool p_pause) {
if (p_pause != stream_paused) {
stream_paused = p_pause;
stream_paused_fade_in = !p_pause;
stream_paused_fade_out = p_pause;
// TODO this does not have perfect recall, fix that maybe? If the stream isn't set, we can't persist this bool.
if (stream_playback.is_valid()) {
AudioServer::get_singleton()->set_playback_paused(stream_playback, p_pause);
}
}
bool AudioStreamPlayer2D::get_stream_paused() const {
return stream_paused;
if (stream_playback.is_valid()) {
return AudioServer::get_singleton()->is_playback_paused(stream_playback);
}
return false;
}
Ref<AudioStreamPlayback> AudioStreamPlayer2D::get_stream_playback() {

View file

@ -51,38 +51,30 @@ private:
Viewport *viewport = nullptr; //pointer only used for reference to previous mix
};
Output outputs[MAX_OUTPUTS];
SafeNumeric<int> output_count;
SafeFlag output_ready;
//these are used by audio thread to have a reference of previous volumes (for ramping volume and avoiding clicks)
Output prev_outputs[MAX_OUTPUTS];
int prev_output_count = 0;
Ref<AudioStreamPlayback> stream_playback;
Ref<AudioStream> stream;
Vector<AudioFrame> mix_buffer;
SafeNumeric<float> setseek{ -1.0 };
SafeFlag active;
SafeNumeric<float> setplay{ -1.0 };
Vector<AudioFrame> volume_vector;
uint64_t last_mix_count = -1;
float volume_db = 0.0;
float pitch_scale = 1.0;
bool autoplay = false;
bool stream_paused = false;
bool stream_paused_fade_in = false;
bool stream_paused_fade_out = false;
StringName bus;
void _mix_audio();
static void _mix_audios(void *self) { reinterpret_cast<AudioStreamPlayer2D *>(self)->_mix_audio(); }
StringName default_bus = "Master";
void _set_playing(bool p_enable);
bool _is_active() const;
StringName _get_actual_bus();
void _update_panning();
void _bus_layout_changed();
static void _listener_changed_cb(void *self) { reinterpret_cast<AudioStreamPlayer2D *>(self)->_update_panning(); }
uint32_t area_mask = 1;
float max_distance = 2000.0;

View file

@ -95,7 +95,7 @@ static const Vector3 speaker_directions[7] = {
Vector3(1.0, 0.0, 0.0).normalized(), // side-right
};
void AudioStreamPlayer3D::_calc_output_vol(const Vector3 &source_dir, real_t tightness, AudioStreamPlayer3D::Output &output) {
void AudioStreamPlayer3D::_calc_output_vol(const Vector3 &source_dir, real_t tightness, Vector<AudioFrame> &output) {
unsigned int speaker_count = 0; // only main speakers (no LFE)
switch (AudioServer::get_singleton()->get_speaker_mode()) {
case AudioServer::SPEAKER_MODE_STEREO:
@ -118,182 +118,94 @@ void AudioStreamPlayer3D::_calc_output_vol(const Vector3 &source_dir, real_t tig
switch (AudioServer::get_singleton()->get_speaker_mode()) {
case AudioServer::SPEAKER_SURROUND_71:
output.vol[3].l = volumes[5]; // side-left
output.vol[3].r = volumes[6]; // side-right
output.write[3].l = volumes[5]; // side-left
output.write[3].r = volumes[6]; // side-right
[[fallthrough]];
case AudioServer::SPEAKER_SURROUND_51:
output.vol[2].l = volumes[3]; // rear-left
output.vol[2].r = volumes[4]; // rear-right
output.write[2].l = volumes[3]; // rear-left
output.write[2].r = volumes[4]; // rear-right
[[fallthrough]];
case AudioServer::SPEAKER_SURROUND_31:
output.vol[1].r = 1.0; // LFE - always full power
output.vol[1].l = volumes[2]; // center
output.write[1].r = 1.0; // LFE - always full power
output.write[1].l = volumes[2]; // center
[[fallthrough]];
case AudioServer::SPEAKER_MODE_STEREO:
output.vol[0].r = volumes[1]; // front-right
output.vol[0].l = volumes[0]; // front-left
output.write[0].r = volumes[1]; // front-right
output.write[0].l = volumes[0]; // front-left
break;
}
}
void AudioStreamPlayer3D::_mix_audio() {
if (!stream_playback.is_valid() || !active.is_set() ||
(stream_paused && !stream_paused_fade_out)) {
return;
}
void AudioStreamPlayer3D::_calc_reverb_vol(Area3D *area, Vector3 listener_area_pos, Vector<AudioFrame> direct_path_vol, Vector<AudioFrame> &reverb_vol) {
reverb_vol.resize(4);
reverb_vol.write[0] = AudioFrame(0, 0);
reverb_vol.write[1] = AudioFrame(0, 0);
reverb_vol.write[2] = AudioFrame(0, 0);
reverb_vol.write[3] = AudioFrame(0, 0);
bool started = false;
if (setseek.get() >= 0.0) {
stream_playback->start(setseek.get());
setseek.set(-1.0); //reset seek
started = true;
}
float uniformity = area->get_reverb_uniformity();
float area_send = area->get_reverb_amount();
//get data
AudioFrame *buffer = mix_buffer.ptrw();
int buffer_size = mix_buffer.size();
if (uniformity > 0.0) {
float distance = listener_area_pos.length();
float attenuation = Math::db2linear(_get_attenuation_db(distance));
if (stream_paused_fade_out) {
// Short fadeout ramp
buffer_size = MIN(buffer_size, 128);
}
// Determine the fraction of sound that would come from each speaker if they were all driven uniformly.
float center_val[3] = { 0.5f, 0.25f, 0.16666f };
int channel_count = AudioServer::get_singleton()->get_channel_count();
AudioFrame center_frame(center_val[channel_count - 1], center_val[channel_count - 1]);
// Mix if we're not paused or we're fading out
if ((output_count.get() > 0 || out_of_range_mode == OUT_OF_RANGE_MIX)) {
float output_pitch_scale = 0.0;
if (output_count.get()) {
//used for doppler, not realistic but good enough
for (int i = 0; i < output_count.get(); i++) {
output_pitch_scale += outputs[i].pitch_scale;
if (attenuation < 1.0) {
//pan the uniform sound
Vector3 rev_pos = listener_area_pos;
rev_pos.y = 0;
rev_pos.normalize();
if (channel_count >= 1) {
// Stereo pair
float c = rev_pos.x * 0.5 + 0.5;
reverb_vol.write[0].l = 1.0 - c;
reverb_vol.write[0].r = c;
}
if (channel_count >= 3) {
// Center pair + Side pair
float xl = Vector3(-1, 0, -1).normalized().dot(rev_pos) * 0.5 + 0.5;
float xr = Vector3(1, 0, -1).normalized().dot(rev_pos) * 0.5 + 0.5;
reverb_vol.write[1].l = xl;
reverb_vol.write[1].r = xr;
reverb_vol.write[2].l = 1.0 - xr;
reverb_vol.write[2].r = 1.0 - xl;
}
if (channel_count >= 4) {
// Rear pair
// FIXME: Not sure what math should be done here
float c = rev_pos.x * 0.5 + 0.5;
reverb_vol.write[3].l = 1.0 - c;
reverb_vol.write[3].r = c;
}
for (int i = 0; i < channel_count; i++) {
reverb_vol.write[i] = reverb_vol[i].lerp(center_frame, attenuation);
}
output_pitch_scale /= float(output_count.get());
} else {
output_pitch_scale = 1.0;
for (int i = 0; i < channel_count; i++) {
reverb_vol.write[i] = center_frame;
}
}
stream_playback->mix(buffer, pitch_scale * output_pitch_scale, buffer_size);
for (int i = 0; i < channel_count; i++) {
reverb_vol.write[i] = direct_path_vol[i].lerp(reverb_vol[i] * attenuation, uniformity);
reverb_vol.write[i] *= area_send;
}
} else {
for (int i = 0; i < 4; i++) {
reverb_vol.write[i] = direct_path_vol[i] * area_send;
}
}
//write all outputs
for (int i = 0; i < output_count.get(); i++) {
Output current = outputs[i];
//see if current output exists, to keep volume ramp
bool found = false;
for (int j = i; j < prev_output_count; j++) {
if (prev_outputs[j].viewport == current.viewport) {
if (j != i) {
SWAP(prev_outputs[j], prev_outputs[i]);
}
found = true;
break;
}
}
bool interpolate_filter = !started;
if (!found) {
//create new if was not used before
if (prev_output_count < MAX_OUTPUTS) {
prev_outputs[prev_output_count] = prev_outputs[i]; //may be owned by another viewport
prev_output_count++;
}
prev_outputs[i] = current;
interpolate_filter = false;
}
//mix!
int buffers = AudioServer::get_singleton()->get_channel_count();
for (int k = 0; k < buffers; k++) {
AudioFrame target_volume = stream_paused_fade_out ? AudioFrame(0.f, 0.f) : current.vol[k];
AudioFrame vol_prev = stream_paused_fade_in ? AudioFrame(0.f, 0.f) : prev_outputs[i].vol[k];
AudioFrame vol_inc = (target_volume - vol_prev) / float(buffer_size);
AudioFrame vol = vol_prev;
if (!AudioServer::get_singleton()->thread_has_channel_mix_buffer(current.bus_index, k)) {
continue; //may have been deleted, will be updated on process
}
AudioFrame *target = AudioServer::get_singleton()->thread_get_channel_mix_buffer(current.bus_index, k);
current.filter.set_mode(AudioFilterSW::HIGHSHELF);
current.filter.set_sampling_rate(AudioServer::get_singleton()->get_mix_rate());
current.filter.set_cutoff(attenuation_filter_cutoff_hz);
current.filter.set_resonance(1);
current.filter.set_stages(1);
current.filter.set_gain(current.filter_gain);
if (interpolate_filter) {
current.filter_process[k * 2 + 0] = prev_outputs[i].filter_process[k * 2 + 0];
current.filter_process[k * 2 + 1] = prev_outputs[i].filter_process[k * 2 + 1];
current.filter_process[k * 2 + 0].set_filter(&current.filter, false);
current.filter_process[k * 2 + 1].set_filter(&current.filter, false);
current.filter_process[k * 2 + 0].update_coeffs(buffer_size);
current.filter_process[k * 2 + 1].update_coeffs(buffer_size);
for (int j = 0; j < buffer_size; j++) {
AudioFrame f = buffer[j] * vol;
current.filter_process[k * 2 + 0].process_one_interp(f.l);
current.filter_process[k * 2 + 1].process_one_interp(f.r);
target[j] += f;
vol += vol_inc;
}
} else {
current.filter_process[k * 2 + 0].set_filter(&current.filter);
current.filter_process[k * 2 + 1].set_filter(&current.filter);
current.filter_process[k * 2 + 0].update_coeffs();
current.filter_process[k * 2 + 1].update_coeffs();
for (int j = 0; j < buffer_size; j++) {
AudioFrame f = buffer[j] * vol;
current.filter_process[k * 2 + 0].process_one(f.l);
current.filter_process[k * 2 + 1].process_one(f.r);
target[j] += f;
vol += vol_inc;
}
}
if (current.reverb_bus_index >= 0) {
if (!AudioServer::get_singleton()->thread_has_channel_mix_buffer(current.reverb_bus_index, k)) {
continue; //may have been deleted, will be updated on process
}
AudioFrame *rtarget = AudioServer::get_singleton()->thread_get_channel_mix_buffer(current.reverb_bus_index, k);
if (current.reverb_bus_index == prev_outputs[i].reverb_bus_index) {
AudioFrame rvol_inc = (current.reverb_vol[k] - prev_outputs[i].reverb_vol[k]) / float(buffer_size);
AudioFrame rvol = prev_outputs[i].reverb_vol[k];
for (int j = 0; j < buffer_size; j++) {
rtarget[j] += buffer[j] * rvol;
rvol += rvol_inc;
}
} else {
AudioFrame rvol = current.reverb_vol[k];
for (int j = 0; j < buffer_size; j++) {
rtarget[j] += buffer[j] * rvol;
}
}
}
}
prev_outputs[i] = current;
}
prev_output_count = output_count.get();
//stream is no longer active, disable this.
if (!stream_playback->is_playing()) {
active.clear();
}
output_ready.clear();
stream_paused_fade_in = false;
stream_paused_fade_out = false;
}
float AudioStreamPlayer3D::_get_attenuation_db(float p_distance) const {
@ -329,14 +241,15 @@ float AudioStreamPlayer3D::_get_attenuation_db(float p_distance) const {
void AudioStreamPlayer3D::_notification(int p_what) {
if (p_what == NOTIFICATION_ENTER_TREE) {
velocity_tracker->reset(get_global_transform().origin);
AudioServer::get_singleton()->add_callback(_mix_audios, this);
AudioServer::get_singleton()->add_listener_changed_callback(_listener_changed_cb, this);
if (autoplay && !Engine::get_singleton()->is_editor_hint()) {
play();
}
}
if (p_what == NOTIFICATION_EXIT_TREE) {
AudioServer::get_singleton()->remove_callback(_mix_audios, this);
stop();
AudioServer::get_singleton()->remove_listener_changed_callback(_listener_changed_cb, this);
}
if (p_what == NOTIFICATION_PAUSED) {
@ -359,251 +272,24 @@ void AudioStreamPlayer3D::_notification(int p_what) {
if (p_what == NOTIFICATION_INTERNAL_PHYSICS_PROCESS) {
//update anything related to position first, if possible of course
if (!output_ready.is_set()) {
Vector3 linear_velocity;
//compute linear velocity for doppler
if (doppler_tracking != DOPPLER_TRACKING_DISABLED) {
linear_velocity = velocity_tracker->get_tracked_linear_velocity();
}
Ref<World3D> world_3d = get_world_3d();
ERR_FAIL_COND(world_3d.is_null());
int new_output_count = 0;
Vector3 global_pos = get_global_transform().origin;
int bus_index = AudioServer::get_singleton()->thread_find_bus_index(bus);
//check if any area is diverting sound into a bus
PhysicsDirectSpaceState3D *space_state = PhysicsServer3D::get_singleton()->space_get_direct_state(world_3d->get_space());
PhysicsDirectSpaceState3D::ShapeResult sr[MAX_INTERSECT_AREAS];
int areas = space_state->intersect_point(global_pos, sr, MAX_INTERSECT_AREAS, Set<RID>(), area_mask, false, true);
Area3D *area = nullptr;
for (int i = 0; i < areas; i++) {
if (!sr[i].collider) {
continue;
}
Area3D *tarea = Object::cast_to<Area3D>(sr[i].collider);
if (!tarea) {
continue;
}
if (!tarea->is_overriding_audio_bus() && !tarea->is_using_reverb_bus()) {
continue;
}
area = tarea;
break;
}
for (const Set<Camera3D *>::Element *E = world_3d->get_cameras().front(); E; E = E->next()) {
Camera3D *camera = E->get();
Viewport *vp = camera->get_viewport();
if (!vp->is_audio_listener_3d()) {
continue;
}
bool listener_is_camera = true;
Node3D *listener_node = camera;
Listener3D *listener = vp->get_listener_3d();
if (listener) {
listener_node = listener;
listener_is_camera = false;
}
Vector3 local_pos = listener_node->get_global_transform().orthonormalized().affine_inverse().xform(global_pos);
float dist = local_pos.length();
Vector3 area_sound_pos;
Vector3 listener_area_pos;
if (area && area->is_using_reverb_bus() && area->get_reverb_uniformity() > 0) {
area_sound_pos = space_state->get_closest_point_to_object_volume(area->get_rid(), listener_node->get_global_transform().origin);
listener_area_pos = listener_node->to_local(area_sound_pos);
}
if (max_distance > 0) {
float total_max = max_distance;
if (area && area->is_using_reverb_bus() && area->get_reverb_uniformity() > 0) {
total_max = MAX(total_max, listener_area_pos.length());
}
if (total_max > max_distance) {
continue; //can't hear this sound in this listener
}
}
float multiplier = Math::db2linear(_get_attenuation_db(dist));
if (max_distance > 0) {
multiplier *= MAX(0, 1.0 - (dist / max_distance));
}
Output output;
output.bus_index = bus_index;
output.reverb_bus_index = -1; //no reverb by default
output.viewport = vp;
float db_att = (1.0 - MIN(1.0, multiplier)) * attenuation_filter_db;
if (emission_angle_enabled) {
Vector3 listenertopos = global_pos - listener_node->get_global_transform().origin;
float c = listenertopos.normalized().dot(get_global_transform().basis.get_axis(2).normalized()); //it's z negative
float angle = Math::rad2deg(Math::acos(c));
if (angle > emission_angle) {
db_att -= -emission_angle_filter_attenuation_db;
}
}
output.filter_gain = Math::db2linear(db_att);
//TODO: The lower the second parameter (tightness) the more the sound will "enclose" the listener (more undirected / playing from
// speakers not facing the source) - this could be made distance dependent.
_calc_output_vol(local_pos.normalized(), 4.0, output);
unsigned int cc = AudioServer::get_singleton()->get_channel_count();
for (unsigned int k = 0; k < cc; k++) {
output.vol[k] *= multiplier;
}
bool filled_reverb = false;
int vol_index_max = AudioServer::get_singleton()->get_speaker_mode() + 1;
if (area) {
if (area->is_overriding_audio_bus()) {
//override audio bus
StringName bus_name = area->get_audio_bus_name();
output.bus_index = AudioServer::get_singleton()->thread_find_bus_index(bus_name);
}
if (area->is_using_reverb_bus()) {
filled_reverb = true;
StringName bus_name = area->get_reverb_bus();
output.reverb_bus_index = AudioServer::get_singleton()->thread_find_bus_index(bus_name);
float uniformity = area->get_reverb_uniformity();
float area_send = area->get_reverb_amount();
if (uniformity > 0.0) {
float distance = listener_area_pos.length();
float attenuation = Math::db2linear(_get_attenuation_db(distance));
//float dist_att_db = -20 * Math::log(dist + 0.00001); //logarithmic attenuation, like in real life
float center_val[3] = { 0.5f, 0.25f, 0.16666f };
AudioFrame center_frame(center_val[vol_index_max - 1], center_val[vol_index_max - 1]);
if (attenuation < 1.0) {
//pan the uniform sound
Vector3 rev_pos = listener_area_pos;
rev_pos.y = 0;
rev_pos.normalize();
if (cc >= 1) {
// Stereo pair
float c = rev_pos.x * 0.5 + 0.5;
output.reverb_vol[0].l = 1.0 - c;
output.reverb_vol[0].r = c;
}
if (cc >= 3) {
// Center pair + Side pair
float xl = Vector3(-1, 0, -1).normalized().dot(rev_pos) * 0.5 + 0.5;
float xr = Vector3(1, 0, -1).normalized().dot(rev_pos) * 0.5 + 0.5;
output.reverb_vol[1].l = xl;
output.reverb_vol[1].r = xr;
output.reverb_vol[2].l = 1.0 - xr;
output.reverb_vol[2].r = 1.0 - xl;
}
if (cc >= 4) {
// Rear pair
// FIXME: Not sure what math should be done here
float c = rev_pos.x * 0.5 + 0.5;
output.reverb_vol[3].l = 1.0 - c;
output.reverb_vol[3].r = c;
}
for (int i = 0; i < vol_index_max; i++) {
output.reverb_vol[i] = output.reverb_vol[i].lerp(center_frame, attenuation);
}
} else {
for (int i = 0; i < vol_index_max; i++) {
output.reverb_vol[i] = center_frame;
}
}
for (int i = 0; i < vol_index_max; i++) {
output.reverb_vol[i] = output.vol[i].lerp(output.reverb_vol[i] * attenuation, uniformity);
output.reverb_vol[i] *= area_send;
}
} else {
for (int i = 0; i < vol_index_max; i++) {
output.reverb_vol[i] = output.vol[i] * area_send;
}
}
}
}
if (doppler_tracking != DOPPLER_TRACKING_DISABLED) {
Vector3 listener_velocity;
if (listener_is_camera) {
listener_velocity = camera->get_doppler_tracked_velocity();
}
Vector3 local_velocity = listener_node->get_global_transform().orthonormalized().basis.xform_inv(linear_velocity - listener_velocity);
if (local_velocity == Vector3()) {
output.pitch_scale = 1.0;
} else {
float approaching = local_pos.normalized().dot(local_velocity.normalized());
float velocity = local_velocity.length();
float speed_of_sound = 343.0;
output.pitch_scale = speed_of_sound / (speed_of_sound + velocity * approaching);
output.pitch_scale = CLAMP(output.pitch_scale, (1 / 8.0), 8.0); //avoid crazy stuff
}
} else {
output.pitch_scale = 1.0;
}
if (!filled_reverb) {
for (int i = 0; i < vol_index_max; i++) {
output.reverb_vol[i] = AudioFrame(0, 0);
}
}
outputs[new_output_count] = output;
new_output_count++;
if (new_output_count == MAX_OUTPUTS) {
break;
}
}
output_count.set(new_output_count);
output_ready.set();
if (!stream_playback.is_valid()) {
return;
}
//start playing if requested
if (setplay.get() >= 0.0) {
setseek.set(setplay.get());
if (setplay.get() >= 0) {
Vector<AudioFrame> volume_vector = _update_panning();
AudioServer::get_singleton()->start_playback_stream(stream_playback, _get_actual_bus(), volume_vector, setplay.get());
active.set();
setplay.set(-1);
}
//stop playing if no longer active
if (active.is_set() && last_mix_count != AudioServer::get_singleton()->get_mix_count()) {
_update_panning();
last_mix_count = AudioServer::get_singleton()->get_mix_count();
}
// Stop playing if no longer active.
if (!active.is_set()) {
set_physics_process_internal(false);
emit_signal(SNAME("finished"));
@ -611,16 +297,191 @@ void AudioStreamPlayer3D::_notification(int p_what) {
}
}
Area3D *AudioStreamPlayer3D::_get_overriding_area() {
//check if any area is diverting sound into a bus
Ref<World3D> world_3d = get_world_3d();
ERR_FAIL_COND_V(world_3d.is_null(), nullptr);
Vector3 global_pos = get_global_transform().origin;
PhysicsDirectSpaceState3D *space_state = PhysicsServer3D::get_singleton()->space_get_direct_state(world_3d->get_space());
PhysicsDirectSpaceState3D::ShapeResult sr[MAX_INTERSECT_AREAS];
int areas = space_state->intersect_point(global_pos, sr, MAX_INTERSECT_AREAS, Set<RID>(), area_mask, false, true);
for (int i = 0; i < areas; i++) {
if (!sr[i].collider) {
continue;
}
Area3D *tarea = Object::cast_to<Area3D>(sr[i].collider);
if (!tarea) {
continue;
}
if (!tarea->is_overriding_audio_bus() && !tarea->is_using_reverb_bus()) {
continue;
}
return tarea;
}
return nullptr;
}
StringName AudioStreamPlayer3D::_get_actual_bus() {
if (!stream_playback.is_valid()) {
return SNAME("Master");
}
Area3D *overriding_area = _get_overriding_area();
if (overriding_area && overriding_area->is_overriding_audio_bus() && !overriding_area->is_using_reverb_bus()) {
return overriding_area->get_audio_bus_name();
}
return bus;
}
Vector<AudioFrame> AudioStreamPlayer3D::_update_panning() {
Vector<AudioFrame> output_volume_vector;
output_volume_vector.resize(4);
for (AudioFrame &frame : output_volume_vector) {
frame = AudioFrame(0, 0);
}
ERR_FAIL_COND_V(stream_playback.is_null(), output_volume_vector);
Vector3 linear_velocity;
//compute linear velocity for doppler
if (doppler_tracking != DOPPLER_TRACKING_DISABLED) {
linear_velocity = velocity_tracker->get_tracked_linear_velocity();
}
Vector3 global_pos = get_global_transform().origin;
Ref<World3D> world_3d = get_world_3d();
ERR_FAIL_COND_V(world_3d.is_null(), output_volume_vector);
Set<Camera3D *> cameras = world_3d->get_cameras();
cameras.insert(get_viewport()->get_camera_3d());
PhysicsDirectSpaceState3D *space_state = PhysicsServer3D::get_singleton()->space_get_direct_state(world_3d->get_space());
for (Camera3D *camera : cameras) {
Viewport *vp = camera->get_viewport();
if (!vp->is_audio_listener_3d()) {
continue;
}
bool listener_is_camera = true;
Node3D *listener_node = camera;
Listener3D *listener = vp->get_listener_3d();
if (listener) {
listener_node = listener;
listener_is_camera = false;
}
Vector3 local_pos = listener_node->get_global_transform().orthonormalized().affine_inverse().xform(global_pos);
float dist = local_pos.length();
Vector3 area_sound_pos;
Vector3 listener_area_pos;
Area3D *area = _get_overriding_area();
if (area && area->is_using_reverb_bus() && area->get_reverb_uniformity() > 0) {
area_sound_pos = space_state->get_closest_point_to_object_volume(area->get_rid(), listener_node->get_global_transform().origin);
listener_area_pos = listener_node->get_global_transform().affine_inverse().xform(area_sound_pos);
}
if (max_distance > 0) {
float total_max = max_distance;
if (area && area->is_using_reverb_bus() && area->get_reverb_uniformity() > 0) {
total_max = MAX(total_max, listener_area_pos.length());
}
if (total_max > max_distance) {
continue; //can't hear this sound in this listener
}
}
float multiplier = Math::db2linear(_get_attenuation_db(dist));
if (max_distance > 0) {
multiplier *= MAX(0, 1.0 - (dist / max_distance));
}
float db_att = (1.0 - MIN(1.0, multiplier)) * attenuation_filter_db;
if (emission_angle_enabled) {
Vector3 listenertopos = global_pos - listener_node->get_global_transform().origin;
float c = listenertopos.normalized().dot(get_global_transform().basis.get_axis(2).normalized()); //it's z negative
float angle = Math::rad2deg(Math::acos(c));
if (angle > emission_angle) {
db_att -= -emission_angle_filter_attenuation_db;
}
}
AudioServer::get_singleton()->set_playback_highshelf_params(stream_playback, Math::db2linear(db_att), attenuation_filter_cutoff_hz);
//TODO: The lower the second parameter (tightness) the more the sound will "enclose" the listener (more undirected / playing from
// speakers not facing the source) - this could be made distance dependent.
_calc_output_vol(local_pos.normalized(), 4.0, output_volume_vector);
for (unsigned int k = 0; k < 4; k++) {
output_volume_vector.write[k] = multiplier * output_volume_vector[k];
}
Map<StringName, Vector<AudioFrame>> bus_volumes;
if (area) {
if (area->is_overriding_audio_bus()) {
//override audio bus
bus_volumes[area->get_audio_bus_name()] = output_volume_vector;
}
if (area->is_using_reverb_bus()) {
StringName reverb_bus_name = area->get_reverb_bus();
Vector<AudioFrame> reverb_vol;
_calc_reverb_vol(area, listener_area_pos, output_volume_vector, reverb_vol);
bus_volumes[reverb_bus_name] = reverb_vol;
}
} else {
bus_volumes[bus] = output_volume_vector;
}
AudioServer::get_singleton()->set_playback_bus_volumes_linear(stream_playback, bus_volumes);
if (doppler_tracking != DOPPLER_TRACKING_DISABLED) {
Vector3 listener_velocity;
if (listener_is_camera) {
listener_velocity = camera->get_doppler_tracked_velocity();
}
Vector3 local_velocity = listener_node->get_global_transform().orthonormalized().basis.xform_inv(linear_velocity - listener_velocity);
if (local_velocity == Vector3()) {
AudioServer::get_singleton()->set_playback_pitch_scale(stream_playback, pitch_scale);
} else {
float approaching = local_pos.normalized().dot(local_velocity.normalized());
float velocity = local_velocity.length();
float speed_of_sound = 343.0;
float doppler_pitch_scale = pitch_scale * speed_of_sound / (speed_of_sound + velocity * approaching);
doppler_pitch_scale = CLAMP(doppler_pitch_scale, (1 / 8.0), 8.0); //avoid crazy stuff
AudioServer::get_singleton()->set_playback_pitch_scale(stream_playback, doppler_pitch_scale);
}
} else {
AudioServer::get_singleton()->set_playback_pitch_scale(stream_playback, pitch_scale);
}
}
return output_volume_vector;
}
void AudioStreamPlayer3D::set_stream(Ref<AudioStream> p_stream) {
AudioServer::get_singleton()->lock();
mix_buffer.resize(AudioServer::get_singleton()->thread_get_mix_buffer_size());
if (stream_playback.is_valid()) {
stop();
stream_playback.unref();
stream.unref();
active.clear();
setseek.set(-1);
}
if (p_stream.is_valid()) {
@ -632,8 +493,6 @@ void AudioStreamPlayer3D::set_stream(Ref<AudioStream> p_stream) {
}
}
AudioServer::get_singleton()->unlock();
if (p_stream.is_valid() && stream_playback.is_null()) {
stream.unref();
}
@ -677,27 +536,22 @@ float AudioStreamPlayer3D::get_pitch_scale() const {
}
void AudioStreamPlayer3D::play(float p_from_pos) {
if (!is_playing()) {
// Reset the prev_output_count if the stream is stopped
prev_output_count = 0;
}
if (stream_playback.is_valid()) {
setplay.set(p_from_pos);
output_ready.clear();
set_physics_process_internal(true);
}
}
void AudioStreamPlayer3D::seek(float p_seconds) {
if (stream_playback.is_valid()) {
setseek.set(p_seconds);
if (stream_playback.is_valid() && active.is_set()) {
play(p_seconds);
}
}
void AudioStreamPlayer3D::stop() {
if (stream_playback.is_valid()) {
active.clear();
AudioServer::get_singleton()->stop_playback_stream(stream_playback);
set_physics_process_internal(false);
setplay.set(-1);
}
@ -713,11 +567,7 @@ bool AudioStreamPlayer3D::is_playing() const {
float AudioStreamPlayer3D::get_playback_position() {
if (stream_playback.is_valid()) {
float ss = setseek.get();
if (ss >= 0.0) {
return ss;
}
return stream_playback->get_playback_position();
return AudioServer::get_singleton()->get_playback_position(stream_playback);
}
return 0;
@ -736,7 +586,7 @@ StringName AudioStreamPlayer3D::get_bus() const {
return bus;
}
}
return "Master";
return SNAME("Master");
}
void AudioStreamPlayer3D::set_autoplay(bool p_enable) {
@ -879,15 +729,16 @@ AudioStreamPlayer3D::DopplerTracking AudioStreamPlayer3D::get_doppler_tracking()
}
void AudioStreamPlayer3D::set_stream_paused(bool p_pause) {
if (p_pause != stream_paused) {
stream_paused = p_pause;
stream_paused_fade_in = !stream_paused;
stream_paused_fade_out = stream_paused;
if (stream_playback.is_valid()) {
AudioServer::get_singleton()->set_playback_paused(stream_playback, p_pause);
}
}
bool AudioStreamPlayer3D::get_stream_paused() const {
return stream_paused;
if (stream_playback.is_valid()) {
return AudioServer::get_singleton()->is_playback_paused(stream_playback);
}
return false;
}
Ref<AudioStreamPlayback> AudioStreamPlayer3D::get_stream_playback() {

View file

@ -31,10 +31,12 @@
#ifndef AUDIO_STREAM_PLAYER_3D_H
#define AUDIO_STREAM_PLAYER_3D_H
#include "scene/3d/area_3d.h"
#include "scene/3d/node_3d.h"
#include "scene/3d/velocity_tracker_3d.h"
#include "servers/audio/audio_filter_sw.h"
#include "servers/audio/audio_stream.h"
#include "servers/audio_server.h"
class Camera3D;
class AudioStreamPlayer3D : public Node3D {
@ -66,31 +68,9 @@ private:
};
struct Output {
AudioFilterSW filter;
AudioFilterSW::Processor filter_process[8];
AudioFrame vol[4];
float filter_gain = 0.0;
float pitch_scale = 0.0;
int bus_index = -1;
int reverb_bus_index = -1;
AudioFrame reverb_vol[4];
Viewport *viewport = nullptr; //pointer only used for reference to previous mix
};
Output outputs[MAX_OUTPUTS];
SafeNumeric<int> output_count;
SafeFlag output_ready;
//these are used by audio thread to have a reference of previous volumes (for ramping volume and avoiding clicks)
Output prev_outputs[MAX_OUTPUTS];
int prev_output_count = 0;
Ref<AudioStreamPlayback> stream_playback;
Ref<AudioStream> stream;
Vector<AudioFrame> mix_buffer;
SafeNumeric<float> setseek{ -1.0 };
SafeFlag active;
SafeNumeric<float> setplay{ -1.0 };
@ -100,17 +80,21 @@ private:
float max_db = 3.0;
float pitch_scale = 1.0;
bool autoplay = false;
bool stream_paused = false;
bool stream_paused_fade_in = false;
bool stream_paused_fade_out = false;
StringName bus;
StringName bus = "Master";
static void _calc_output_vol(const Vector3 &source_dir, real_t tightness, Output &output);
void _mix_audio();
static void _mix_audios(void *self) { reinterpret_cast<AudioStreamPlayer3D *>(self)->_mix_audio(); }
uint64_t last_mix_count = -1;
static void _calc_output_vol(const Vector3 &source_dir, real_t tightness, Vector<AudioFrame> &output);
void _calc_reverb_vol(Area3D *area, Vector3 listener_area_pos, Vector<AudioFrame> direct_path_vol, Vector<AudioFrame> &reverb_vol);
static void _listener_changed_cb(void *self) { reinterpret_cast<AudioStreamPlayer3D *>(self)->_update_panning(); }
void _set_playing(bool p_enable);
bool _is_active() const;
StringName _get_actual_bus();
Area3D *_get_overriding_area();
Vector<AudioFrame> _update_panning();
void _bus_layout_changed();

View file

@ -31,119 +31,18 @@
#include "audio_stream_player.h"
#include "core/config/engine.h"
void AudioStreamPlayer::_mix_to_bus(const AudioFrame *p_frames, int p_amount) {
int bus_index = AudioServer::get_singleton()->thread_find_bus_index(bus);
AudioFrame *targets[4] = { nullptr, nullptr, nullptr, nullptr };
if (AudioServer::get_singleton()->get_speaker_mode() == AudioServer::SPEAKER_MODE_STEREO) {
targets[0] = AudioServer::get_singleton()->thread_get_channel_mix_buffer(bus_index, 0);
} else {
switch (mix_target) {
case MIX_TARGET_STEREO: {
targets[0] = AudioServer::get_singleton()->thread_get_channel_mix_buffer(bus_index, 0);
} break;
case MIX_TARGET_SURROUND: {
for (int i = 0; i < AudioServer::get_singleton()->get_channel_count(); i++) {
targets[i] = AudioServer::get_singleton()->thread_get_channel_mix_buffer(bus_index, i);
}
} break;
case MIX_TARGET_CENTER: {
targets[0] = AudioServer::get_singleton()->thread_get_channel_mix_buffer(bus_index, 1);
} break;
}
}
for (int c = 0; c < 4; c++) {
if (!targets[c]) {
break;
}
for (int i = 0; i < p_amount; i++) {
targets[c][i] += p_frames[i];
}
}
}
void AudioStreamPlayer::_mix_internal(bool p_fadeout) {
//get data
AudioFrame *buffer = mix_buffer.ptrw();
int buffer_size = mix_buffer.size();
if (p_fadeout) {
// Short fadeout ramp
buffer_size = MIN(buffer_size, 128);
}
stream_playback->mix(buffer, pitch_scale, buffer_size);
//multiply volume interpolating to avoid clicks if this changes
float target_volume = p_fadeout ? -80.0 : volume_db;
float vol = Math::db2linear(mix_volume_db);
float vol_inc = (Math::db2linear(target_volume) - vol) / float(buffer_size);
for (int i = 0; i < buffer_size; i++) {
buffer[i] *= vol;
vol += vol_inc;
}
//set volume for next mix
mix_volume_db = target_volume;
_mix_to_bus(buffer, buffer_size);
}
void AudioStreamPlayer::_mix_audio() {
if (use_fadeout) {
_mix_to_bus(fadeout_buffer.ptr(), fadeout_buffer.size());
use_fadeout = false;
}
if (!stream_playback.is_valid() || !active.is_set() ||
(stream_paused && !stream_paused_fade)) {
return;
}
if (stream_paused) {
if (stream_paused_fade && stream_playback->is_playing()) {
_mix_internal(true);
stream_paused_fade = false;
}
return;
}
if (setstop.is_set()) {
_mix_internal(true);
stream_playback->stop();
setstop.clear();
}
if (setseek.get() >= 0.0 && !stop_has_priority.is_set()) {
if (stream_playback->is_playing()) {
//fade out to avoid pops
_mix_internal(true);
}
stream_playback->start(setseek.get());
setseek.set(-1.0); //reset seek
mix_volume_db = volume_db; //reset ramp
}
stop_has_priority.clear();
_mix_internal(false);
}
#include "core/math/audio_frame.h"
#include "servers/audio_server.h"
void AudioStreamPlayer::_notification(int p_what) {
if (p_what == NOTIFICATION_ENTER_TREE) {
AudioServer::get_singleton()->add_callback(_mix_audios, this);
if (autoplay && !Engine::get_singleton()->is_editor_hint()) {
play();
}
}
if (p_what == NOTIFICATION_INTERNAL_PROCESS) {
if (!active.is_set() || (setseek.get() < 0 && !stream_playback->is_playing())) {
if (stream_playback.is_valid() && active.is_set() && !AudioServer::get_singleton()->is_playback_active(stream_playback)) {
active.clear();
set_process_internal(false);
emit_signal(SNAME("finished"));
@ -151,7 +50,9 @@ void AudioStreamPlayer::_notification(int p_what) {
}
if (p_what == NOTIFICATION_EXIT_TREE) {
AudioServer::get_singleton()->remove_callback(_mix_audios, this);
if (stream_playback.is_valid()) {
AudioServer::get_singleton()->stop_playback_stream(stream_playback);
}
}
if (p_what == NOTIFICATION_PAUSED) {
@ -167,38 +68,10 @@ void AudioStreamPlayer::_notification(int p_what) {
}
void AudioStreamPlayer::set_stream(Ref<AudioStream> p_stream) {
AudioServer::get_singleton()->lock();
if (active.is_set() && stream_playback.is_valid() && !stream_paused) {
//changing streams out of the blue is not a great idea, but at least
//let's try to somehow avoid a click
AudioFrame *buffer = fadeout_buffer.ptrw();
int buffer_size = fadeout_buffer.size();
stream_playback->mix(buffer, pitch_scale, buffer_size);
//multiply volume interpolating to avoid clicks if this changes
float target_volume = -80.0;
float vol = Math::db2linear(mix_volume_db);
float vol_inc = (Math::db2linear(target_volume) - vol) / float(buffer_size);
for (int i = 0; i < buffer_size; i++) {
buffer[i] *= vol;
vol += vol_inc;
}
use_fadeout = true;
}
mix_buffer.resize(AudioServer::get_singleton()->thread_get_mix_buffer_size());
if (stream_playback.is_valid()) {
stop();
stream_playback.unref();
stream.unref();
active.clear();
setseek.set(-1);
setstop.clear();
}
if (p_stream.is_valid()) {
@ -210,8 +83,6 @@ void AudioStreamPlayer::set_stream(Ref<AudioStream> p_stream) {
}
}
AudioServer::get_singleton()->unlock();
if (p_stream.is_valid() && stream_playback.is_null()) {
stream.unref();
}
@ -223,6 +94,8 @@ Ref<AudioStream> AudioStreamPlayer::get_stream() const {
void AudioStreamPlayer::set_volume_db(float p_volume) {
volume_db = p_volume;
AudioServer::get_singleton()->set_playback_all_bus_volumes_linear(stream_playback, _get_volume_vector());
}
float AudioStreamPlayer::get_volume_db() const {
@ -232,6 +105,8 @@ float AudioStreamPlayer::get_volume_db() const {
void AudioStreamPlayer::set_pitch_scale(float p_pitch_scale) {
ERR_FAIL_COND(p_pitch_scale <= 0.0);
pitch_scale = p_pitch_scale;
AudioServer::get_singleton()->set_playback_pitch_scale(stream_playback, pitch_scale);
}
float AudioStreamPlayer::get_pitch_scale() const {
@ -239,31 +114,32 @@ float AudioStreamPlayer::get_pitch_scale() const {
}
void AudioStreamPlayer::play(float p_from_pos) {
stop();
if (stream.is_valid()) {
stream_playback = stream->instance_playback();
}
if (stream_playback.is_valid()) {
//mix_volume_db = volume_db; do not reset volume ramp here, can cause clicks
setseek.set(p_from_pos);
stop_has_priority.clear();
AudioServer::get_singleton()->start_playback_stream(stream_playback, bus, _get_volume_vector(), p_from_pos);
active.set();
set_process_internal(true);
}
}
void AudioStreamPlayer::seek(float p_seconds) {
if (stream_playback.is_valid()) {
setseek.set(p_seconds);
if (stream_playback.is_valid() && active.is_set()) {
play(p_seconds);
}
}
void AudioStreamPlayer::stop() {
if (stream_playback.is_valid() && active.is_set()) {
setstop.set();
stop_has_priority.set();
if (stream_playback.is_valid()) {
active.clear();
AudioServer::get_singleton()->stop_playback_stream(stream_playback);
}
}
bool AudioStreamPlayer::is_playing() const {
if (stream_playback.is_valid()) {
return active.is_set() && !setstop.is_set(); //&& stream_playback->is_playing();
return AudioServer::get_singleton()->is_playback_active(stream_playback);
}
return false;
@ -271,26 +147,22 @@ bool AudioStreamPlayer::is_playing() const {
float AudioStreamPlayer::get_playback_position() {
if (stream_playback.is_valid()) {
float ss = setseek.get();
if (ss >= 0.0) {
return ss;
}
return stream_playback->get_playback_position();
return AudioServer::get_singleton()->get_playback_position(stream_playback);
}
return 0;
}
void AudioStreamPlayer::set_bus(const StringName &p_bus) {
//if audio is active, must lock this
AudioServer::get_singleton()->lock();
bus = p_bus;
AudioServer::get_singleton()->unlock();
if (stream_playback.is_valid()) {
AudioServer::get_singleton()->set_playback_bus_exclusive(stream_playback, p_bus, _get_volume_vector());
}
}
StringName AudioStreamPlayer::get_bus() const {
for (int i = 0; i < AudioServer::get_singleton()->get_bus_count(); i++) {
if (AudioServer::get_singleton()->get_bus_name(i) == bus) {
if (AudioServer::get_singleton()->get_bus_name(i) == String(bus)) {
return bus;
}
}
@ -322,18 +194,61 @@ void AudioStreamPlayer::_set_playing(bool p_enable) {
}
bool AudioStreamPlayer::_is_active() const {
return active.is_set();
if (stream_playback.is_valid()) {
return AudioServer::get_singleton()->is_playback_active(stream_playback);
}
return false;
}
void AudioStreamPlayer::set_stream_paused(bool p_pause) {
if (p_pause != stream_paused) {
stream_paused = p_pause;
stream_paused_fade = p_pause;
// TODO this does not have perfect recall, fix that maybe? If the stream isn't set, we can't persist this bool.
if (stream_playback.is_valid()) {
AudioServer::get_singleton()->set_playback_paused(stream_playback, p_pause);
}
}
bool AudioStreamPlayer::get_stream_paused() const {
return stream_paused;
if (stream_playback.is_valid()) {
return AudioServer::get_singleton()->is_playback_paused(stream_playback);
}
return false;
}
Vector<AudioFrame> AudioStreamPlayer::_get_volume_vector() {
Vector<AudioFrame> volume_vector;
// We need at most four stereo pairs (for 7.1 systems).
volume_vector.resize(4);
// Initialize the volume vector to zero.
for (AudioFrame &channel_volume_db : volume_vector) {
channel_volume_db = AudioFrame(0, 0);
}
float volume_linear = Math::db2linear(volume_db);
// Set the volume vector up according to the speaker mode and mix target.
// TODO do we need to scale the volume down when we output to more channels?
if (AudioServer::get_singleton()->get_speaker_mode() == AudioServer::SPEAKER_MODE_STEREO) {
volume_vector.write[0] = AudioFrame(volume_linear, volume_linear);
} else {
switch (mix_target) {
case MIX_TARGET_STEREO: {
volume_vector.write[0] = AudioFrame(volume_linear, volume_linear);
} break;
case MIX_TARGET_SURROUND: {
// TODO Make sure this is right.
volume_vector.write[0] = AudioFrame(volume_linear, volume_linear);
volume_vector.write[1] = AudioFrame(volume_linear, /* LFE= */ 1.0f);
volume_vector.write[2] = AudioFrame(volume_linear, volume_linear);
volume_vector.write[3] = AudioFrame(volume_linear, volume_linear);
} break;
case MIX_TARGET_CENTER: {
// TODO Make sure this is right.
volume_vector.write[1] = AudioFrame(volume_linear, /* LFE= */ 1.0f);
} break;
}
}
return volume_vector;
}
void AudioStreamPlayer::_validate_property(PropertyInfo &property) const {
@ -410,8 +325,6 @@ void AudioStreamPlayer::_bind_methods() {
}
AudioStreamPlayer::AudioStreamPlayer() {
fadeout_buffer.resize(512);
AudioServer::get_singleton()->connect("bus_layout_changed", callable_mp(this, &AudioStreamPlayer::_bus_layout_changed));
}

View file

@ -48,22 +48,13 @@ public:
private:
Ref<AudioStreamPlayback> stream_playback;
Ref<AudioStream> stream;
Vector<AudioFrame> mix_buffer;
Vector<AudioFrame> fadeout_buffer;
bool use_fadeout = false;
SafeNumeric<float> setseek{ -1.0 };
SafeFlag active;
SafeFlag setstop;
SafeFlag stop_has_priority;
float mix_volume_db = 0.0;
float pitch_scale = 1.0;
float volume_db = 0.0;
bool autoplay = false;
bool stream_paused = false;
bool stream_paused_fade = false;
StringName bus;
StringName bus = "Master";
MixTarget mix_target = MIX_TARGET_STEREO;
@ -77,6 +68,8 @@ private:
void _bus_layout_changed();
void _mix_to_bus(const AudioFrame *p_frames, int p_amount);
Vector<AudioFrame> _get_volume_vector();
protected:
void _validate_property(PropertyInfo &property) const override;
void _notification(int p_what);

View file

@ -129,7 +129,7 @@ void VideoPlayer::_mix_audio() {
void VideoPlayer::_notification(int p_notification) {
switch (p_notification) {
case NOTIFICATION_ENTER_TREE: {
AudioServer::get_singleton()->add_callback(_mix_audios, this);
AudioServer::get_singleton()->add_mix_callback(_mix_audios, this);
if (stream.is_valid() && autoplay && !Engine::get_singleton()->is_editor_hint()) {
play();
@ -138,8 +138,7 @@ void VideoPlayer::_notification(int p_notification) {
} break;
case NOTIFICATION_EXIT_TREE: {
AudioServer::get_singleton()->remove_callback(_mix_audios, this);
AudioServer::get_singleton()->remove_mix_callback(_mix_audios, this);
} break;
case NOTIFICATION_INTERNAL_PROCESS: {

View file

@ -52,6 +52,7 @@
#include "scene/resources/text_line.h"
#include "scene/resources/world_2d.h"
#include "scene/scene_string_names.h"
#include "servers/audio_server.h"
void ViewportTexture::setup_local_to_scene() {
if (vp) {
@ -820,12 +821,7 @@ Rect2 Viewport::get_visible_rect() const {
}
void Viewport::_update_listener_2d() {
/*
if (is_inside_tree() && audio_listener_3d && (!get_parent() || (Object::cast_to<Control>(get_parent()) && Object::cast_to<Control>(get_parent())->is_visible_in_tree())))
SpatialSound2DServer::get_singleton()->listener_set_space(internal_listener_2d, find_world_2d()->get_sound_space());
else
SpatialSound2DServer::get_singleton()->listener_set_space(internal_listener_2d, RID());
*/
AudioServer::get_singleton()->notify_listener_changed();
}
void Viewport::set_as_audio_listener_2d(bool p_enable) {
@ -3072,6 +3068,7 @@ bool Viewport::is_audio_listener_3d() const {
}
void Viewport::_update_listener_3d() {
AudioServer::get_singleton()->notify_listener_changed();
}
void Viewport::_listener_transform_3d_changed_notify() {

View file

@ -221,12 +221,12 @@ void AudioStreamPlaybackSample::do_resample(const Depth *p_src, AudioFrame *p_ds
}
}
void AudioStreamPlaybackSample::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
int AudioStreamPlaybackSample::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
if (!base->data || !active) {
for (int i = 0; i < p_frames; i++) {
p_buffer[i] = AudioFrame(0, 0);
}
return;
return 0;
}
int len = base->data_bytes;
@ -395,12 +395,15 @@ void AudioStreamPlaybackSample::mix(AudioFrame *p_buffer, float p_rate_scale, in
}
if (todo) {
int mixed_frames = p_frames - todo;
//bit was missing from mix
int todo_ofs = p_frames - todo;
for (int i = todo_ofs; i < p_frames; i++) {
p_buffer[i] = AudioFrame(0, 0);
}
return mixed_frames;
}
return p_frames;
}
AudioStreamPlaybackSample::AudioStreamPlaybackSample() {}

View file

@ -73,7 +73,7 @@ public:
virtual float get_playback_position() const override;
virtual void seek(float p_time) override;
virtual void mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) override;
virtual int mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) override;
AudioStreamPlaybackSample();
};

View file

@ -74,11 +74,13 @@ void AudioStreamPlayback::seek(float p_time) {
}
}
void AudioStreamPlayback::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
if (GDVIRTUAL_CALL(_mix, p_buffer, p_rate_scale, p_frames)) {
return;
int AudioStreamPlayback::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
int ret;
if (GDVIRTUAL_CALL(_mix, p_buffer, p_rate_scale, p_frames, ret)) {
return ret;
}
WARN_PRINT_ONCE("AudioStreamPlayback::mix unimplemented!");
return 0;
}
void AudioStreamPlayback::_bind_methods() {
@ -103,12 +105,14 @@ void AudioStreamPlaybackResampled::_begin_resample() {
mix_offset = 0;
}
void AudioStreamPlaybackResampled::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
int AudioStreamPlaybackResampled::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
float target_rate = AudioServer::get_singleton()->get_mix_rate();
float playback_speed_scale = AudioServer::get_singleton()->get_playback_speed_scale();
uint64_t mix_increment = uint64_t(((get_stream_sampling_rate() * p_rate_scale * playback_speed_scale) / double(target_rate)) * double(FP_LEN));
int mixed_frames_total = p_frames;
for (int i = 0; i < p_frames; i++) {
uint32_t idx = CUBIC_INTERP_HISTORY + uint32_t(mix_offset >> FP_BITS);
//standard cubic interpolation (great quality/performance ratio)
@ -119,6 +123,11 @@ void AudioStreamPlaybackResampled::mix(AudioFrame *p_buffer, float p_rate_scale,
AudioFrame y2 = internal_buffer[idx - 1];
AudioFrame y3 = internal_buffer[idx - 0];
if (idx <= internal_buffer_end && idx >= internal_buffer_end && mixed_frames_total == p_frames) {
// The internal buffer ends somewhere in this range, and we haven't yet recorded the number of good frames we have.
mixed_frames_total = i;
}
float mu2 = mu * mu;
AudioFrame a0 = 3 * y1 - 3 * y2 + y3 - y0;
AudioFrame a1 = 2 * y0 - 5 * y1 + 4 * y2 - y3;
@ -135,7 +144,14 @@ void AudioStreamPlaybackResampled::mix(AudioFrame *p_buffer, float p_rate_scale,
internal_buffer[2] = internal_buffer[INTERNAL_BUFFER_LEN + 2];
internal_buffer[3] = internal_buffer[INTERNAL_BUFFER_LEN + 3];
if (is_playing()) {
_mix_internal(internal_buffer + 4, INTERNAL_BUFFER_LEN);
int mixed_frames = _mix_internal(internal_buffer + 4, INTERNAL_BUFFER_LEN);
if (mixed_frames != INTERNAL_BUFFER_LEN) {
// internal_buffer[mixed_frames] is the first frame of silence.
internal_buffer_end = mixed_frames;
} else {
// The internal buffer does not contain the first frame of silence.
internal_buffer_end = -1;
}
} else {
//fill with silence, not playing
for (int j = 0; j < INTERNAL_BUFFER_LEN; ++j) {
@ -145,6 +161,7 @@ void AudioStreamPlaybackResampled::mix(AudioFrame *p_buffer, float p_rate_scale,
mix_offset -= (INTERNAL_BUFFER_LEN << FP_BITS);
}
}
return mixed_frames_total;
}
////////////////////////////////
@ -210,7 +227,7 @@ void AudioStreamMicrophone::_bind_methods() {
AudioStreamMicrophone::AudioStreamMicrophone() {
}
void AudioStreamPlaybackMicrophone::_mix_internal(AudioFrame *p_buffer, int p_frames) {
int AudioStreamPlaybackMicrophone::_mix_internal(AudioFrame *p_buffer, int p_frames) {
AudioDriver::get_singleton()->lock();
Vector<int32_t> buf = AudioDriver::get_singleton()->get_input_buffer();
@ -221,6 +238,8 @@ void AudioStreamPlaybackMicrophone::_mix_internal(AudioFrame *p_buffer, int p_fr
unsigned int input_position = AudioDriver::get_singleton()->get_input_position();
#endif
int mixed_frames = p_frames;
if (playback_delay > input_size) {
for (int i = 0; i < p_frames; i++) {
p_buffer[i] = AudioFrame(0.0f, 0.0f);
@ -240,6 +259,9 @@ void AudioStreamPlaybackMicrophone::_mix_internal(AudioFrame *p_buffer, int p_fr
p_buffer[i] = AudioFrame(l, r);
} else {
if (mixed_frames == p_frames) {
mixed_frames = i;
}
p_buffer[i] = AudioFrame(0.0f, 0.0f);
}
}
@ -252,10 +274,12 @@ void AudioStreamPlaybackMicrophone::_mix_internal(AudioFrame *p_buffer, int p_fr
#endif
AudioDriver::get_singleton()->unlock();
return mixed_frames;
}
void AudioStreamPlaybackMicrophone::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
AudioStreamPlaybackResampled::mix(p_buffer, p_rate_scale, p_frames);
int AudioStreamPlaybackMicrophone::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
return AudioStreamPlaybackResampled::mix(p_buffer, p_rate_scale, p_frames);
}
float AudioStreamPlaybackMicrophone::get_stream_sampling_rate() {
@ -428,13 +452,14 @@ void AudioStreamPlaybackRandomPitch::seek(float p_time) {
}
}
void AudioStreamPlaybackRandomPitch::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
int AudioStreamPlaybackRandomPitch::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
if (playing.is_valid()) {
playing->mix(p_buffer, p_rate_scale * pitch_scale, p_frames);
return playing->mix(p_buffer, p_rate_scale * pitch_scale, p_frames);
} else {
for (int i = 0; i < p_frames; i++) {
p_buffer[i] = AudioFrame(0, 0);
}
return p_frames;
}
}

View file

@ -51,7 +51,7 @@ protected:
GDVIRTUAL0RC(int, _get_loop_count)
GDVIRTUAL0RC(float, _get_playback_position)
GDVIRTUAL1(_seek, float)
GDVIRTUAL3(_mix, GDNativePtr<AudioFrame>, float, int)
GDVIRTUAL3R(int, _mix, GDNativePtr<AudioFrame>, float, int)
public:
virtual void start(float p_from_pos = 0.0);
virtual void stop();
@ -62,7 +62,7 @@ public:
virtual float get_playback_position() const;
virtual void seek(float p_time);
virtual void mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames);
virtual int mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames);
};
class AudioStreamPlaybackResampled : public AudioStreamPlayback {
@ -77,15 +77,17 @@ class AudioStreamPlaybackResampled : public AudioStreamPlayback {
};
AudioFrame internal_buffer[INTERNAL_BUFFER_LEN + CUBIC_INTERP_HISTORY];
unsigned int internal_buffer_end = -1;
uint64_t mix_offset;
protected:
void _begin_resample();
virtual void _mix_internal(AudioFrame *p_buffer, int p_frames) = 0;
// Returns the number of frames that were mixed.
virtual int _mix_internal(AudioFrame *p_buffer, int p_frames) = 0;
virtual float get_stream_sampling_rate() = 0;
public:
virtual void mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) override;
virtual int mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) override;
AudioStreamPlaybackResampled() { mix_offset = 0; }
};
@ -140,11 +142,11 @@ class AudioStreamPlaybackMicrophone : public AudioStreamPlaybackResampled {
Ref<AudioStreamMicrophone> microphone;
protected:
virtual void _mix_internal(AudioFrame *p_buffer, int p_frames) override;
virtual int _mix_internal(AudioFrame *p_buffer, int p_frames) override;
virtual float get_stream_sampling_rate() override;
public:
virtual void mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) override;
virtual int mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) override;
virtual void start(float p_from_pos = 0.0) override;
virtual void stop() override;
@ -208,7 +210,7 @@ public:
virtual float get_playback_position() const override;
virtual void seek(float p_time) override;
virtual void mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) override;
virtual int mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) override;
~AudioStreamPlaybackRandomPitch();
};

View file

@ -138,7 +138,7 @@ void AudioStreamGeneratorPlayback::clear_buffer() {
mixed = 0;
}
void AudioStreamGeneratorPlayback::_mix_internal(AudioFrame *p_buffer, int p_frames) {
int AudioStreamGeneratorPlayback::_mix_internal(AudioFrame *p_buffer, int p_frames) {
int read_amount = buffer.data_left();
if (p_frames < read_amount) {
read_amount = p_frames;
@ -156,6 +156,7 @@ void AudioStreamGeneratorPlayback::_mix_internal(AudioFrame *p_buffer, int p_fra
}
mixed += p_frames / generator->get_mix_rate();
return read_amount < p_frames ? read_amount : p_frames;
}
float AudioStreamGeneratorPlayback::get_stream_sampling_rate() {

View file

@ -67,7 +67,7 @@ class AudioStreamGeneratorPlayback : public AudioStreamPlaybackResampled {
AudioStreamGenerator *generator;
protected:
virtual void _mix_internal(AudioFrame *p_buffer, int p_frames) override;
virtual int _mix_internal(AudioFrame *p_buffer, int p_frames) override;
virtual float get_stream_sampling_rate() override;
static void _bind_methods();

View file

@ -32,13 +32,19 @@
#include "core/config/project_settings.h"
#include "core/debugger/engine_debugger.h"
#include "core/error/error_macros.h"
#include "core/io/file_access.h"
#include "core/io/resource_loader.h"
#include "core/math/audio_frame.h"
#include "core/os/os.h"
#include "core/string/string_name.h"
#include "core/templates/pair.h"
#include "scene/resources/audio_stream_sample.h"
#include "servers/audio/audio_driver_dummy.h"
#include "servers/audio/effects/audio_effect_compressor.h"
#include <cstring>
#ifdef TOOLS_ENABLED
#define MARK_EDITED set_edited(true);
#else
@ -234,6 +240,7 @@ AudioDriver *AudioDriverManager::get_driver(int p_driver) {
//////////////////////////////////////////////
void AudioServer::_driver_process(int p_frames, int32_t *p_buffer) {
mix_count++;
int todo = p_frames;
#ifdef DEBUG_ENABLED
@ -331,10 +338,156 @@ void AudioServer::_mix_step() {
bus->soloed = false;
}
}
for (CallbackItem *ci : mix_callback_list) {
ci->callback(ci->userdata);
}
//make callbacks for mixing the audio
for (Set<CallbackItem>::Element *E = callbacks.front(); E; E = E->next()) {
E->get().callback(E->get().userdata);
for (AudioStreamPlaybackListNode *playback : playback_list) {
// Paused streams are no-ops. Don't even mix audio from the stream playback.
if (playback->state.load() == AudioStreamPlaybackListNode::PAUSED) {
continue;
}
bool fading_out = playback->state.load() == AudioStreamPlaybackListNode::FADE_OUT_TO_DELETION || playback->state.load() == AudioStreamPlaybackListNode::FADE_OUT_TO_PAUSE;
AudioFrame *buf = mix_buffer.ptrw();
// Copy the lookeahead buffer into the mix buffer.
for (int i = 0; i < LOOKAHEAD_BUFFER_SIZE; i++) {
buf[i] = playback->lookahead[i];
}
// Mix the audio stream
unsigned int mixed_frames = playback->stream_playback->mix(&buf[LOOKAHEAD_BUFFER_SIZE], playback->pitch_scale.get(), buffer_size);
if (mixed_frames != buffer_size) {
// We know we have at least the size of our lookahead buffer for fade-out purposes.
float fadeout_base = 0.87;
float fadeout_coefficient = 1;
static_assert(LOOKAHEAD_BUFFER_SIZE == 32, "Update fadeout_base and comment here if you change LOOKAHEAD_BUFFER_SIZE.");
// 0.87 ^ 32 = 0.0116. There might still be a pop but it'll be way better than if we didn't do this.
for (unsigned int idx = mixed_frames; idx < buffer_size; idx++) {
fadeout_coefficient *= fadeout_base;
buf[idx] *= fadeout_coefficient;
}
AudioStreamPlaybackListNode::PlaybackState new_state;
new_state = AudioStreamPlaybackListNode::AWAITING_DELETION;
playback->state.store(new_state);
} else {
// Move the last little bit of what we just mixed into our lookahead buffer.
for (int i = 0; i < LOOKAHEAD_BUFFER_SIZE; i++) {
playback->lookahead[i] = buf[buffer_size + i];
}
}
ERR_FAIL_COND(playback->bus_details.load() == nullptr);
// By putting null into the bus details pointers, we're taking ownership of their memory for the duration of this mix.
AudioStreamPlaybackBusDetails *bus_details = nullptr;
{
std::atomic<AudioStreamPlaybackBusDetails *> bus_details_atomic = nullptr;
bus_details = playback->bus_details.exchange(bus_details_atomic);
}
ERR_FAIL_COND(bus_details == nullptr);
AudioStreamPlaybackBusDetails *prev_bus_details = playback->prev_bus_details;
// Mix to any active buses.
for (int idx = 0; idx < MAX_BUSES_PER_PLAYBACK; idx++) {
if (!bus_details->bus_active[idx]) {
continue;
}
int bus_idx = thread_find_bus_index(bus_details->bus[idx]);
int prev_bus_idx = -1;
for (int search_idx = 0; search_idx < MAX_BUSES_PER_PLAYBACK; search_idx++) {
if (!prev_bus_details->bus_active[search_idx]) {
continue;
}
if (prev_bus_details->bus[search_idx].hash() == bus_details->bus[idx].hash()) {
prev_bus_idx = search_idx;
}
}
for (int channel_idx = 0; channel_idx < channel_count; channel_idx++) {
AudioFrame *channel_buf = thread_get_channel_mix_buffer(bus_idx, channel_idx);
if (fading_out) {
bus_details->volume[idx][channel_idx] = AudioFrame(0, 0);
}
AudioFrame channel_vol = bus_details->volume[idx][channel_idx];
AudioFrame prev_channel_vol = AudioFrame(0, 0);
if (prev_bus_idx != -1) {
prev_channel_vol = prev_bus_details->volume[prev_bus_idx][channel_idx];
}
_mix_step_for_channel(channel_buf, buf, prev_channel_vol, channel_vol, playback->attenuation_filter_cutoff_hz.get(), playback->highshelf_gain.get(), &playback->filter_process[channel_idx * 2], &playback->filter_process[channel_idx * 2 + 1]);
}
}
// Now go through and fade-out any buses that were being played to previously that we missed by going through current data.
for (int idx = 0; idx < MAX_BUSES_PER_PLAYBACK; idx++) {
if (!prev_bus_details->bus_active[idx]) {
continue;
}
int bus_idx = thread_find_bus_index(prev_bus_details->bus[idx]);
int current_bus_idx = -1;
for (int search_idx = 0; search_idx < MAX_BUSES_PER_PLAYBACK; search_idx++) {
if (bus_details->bus[search_idx] == prev_bus_details->bus[idx]) {
current_bus_idx = search_idx;
}
}
if (current_bus_idx != -1) {
// If we found a corresponding bus in the current bus assignments, we've already mixed to this bus.
continue;
}
for (int channel_idx = 0; channel_idx < channel_count; channel_idx++) {
AudioFrame *channel_buf = thread_get_channel_mix_buffer(bus_idx, channel_idx);
AudioFrame prev_channel_vol = prev_bus_details->volume[idx][channel_idx];
// Fade out to silence
_mix_step_for_channel(channel_buf, buf, prev_channel_vol, AudioFrame(0, 0), playback->attenuation_filter_cutoff_hz.get(), playback->highshelf_gain.get(), &playback->filter_process[channel_idx * 2], &playback->filter_process[channel_idx * 2 + 1]);
}
}
// Copy the bus details we mixed with to the previous bus details to maintain volume ramps.
std::copy(std::begin(bus_details->bus_active), std::end(bus_details->bus_active), std::begin(prev_bus_details->bus_active));
std::copy(std::begin(bus_details->bus), std::end(bus_details->bus), std::begin(prev_bus_details->bus));
for (int bus_idx = 0; bus_idx < MAX_BUSES_PER_PLAYBACK; bus_idx++) {
std::copy(std::begin(bus_details->volume[bus_idx]), std::end(bus_details->volume[bus_idx]), std::begin(prev_bus_details->volume[bus_idx]));
}
AudioStreamPlaybackBusDetails *bus_details_expected = nullptr;
// Only put the bus details pointer back if it hasn't been updated already.
if (!playback->bus_details.compare_exchange_strong(/* expected= */ bus_details_expected, /* new= */ bus_details)) {
// If it *has* been updated already, queue the old one for deletion.
bus_details_graveyard.insert(bus_details);
}
switch (playback->state.load()) {
case AudioStreamPlaybackListNode::AWAITING_DELETION:
case AudioStreamPlaybackListNode::FADE_OUT_TO_DELETION:
playback_list.erase(playback, [](AudioStreamPlaybackListNode *p) {
if (p->prev_bus_details)
delete p->prev_bus_details;
if (p->bus_details)
delete p->bus_details;
p->stream_playback.unref();
delete p;
});
break;
case AudioStreamPlaybackListNode::FADE_OUT_TO_PAUSE: {
// Pause the stream.
AudioStreamPlaybackListNode::PlaybackState old_state, new_state;
do {
old_state = playback->state.load();
new_state = AudioStreamPlaybackListNode::PAUSED;
} while (!playback->state.compare_exchange_strong(/* expected= */ old_state, new_state));
} break;
case AudioStreamPlaybackListNode::PLAYING:
case AudioStreamPlaybackListNode::PAUSED:
// No-op!
break;
}
}
for (int i = buses.size() - 1; i >= 0; i--) {
@ -464,6 +617,53 @@ void AudioServer::_mix_step() {
to_mix = buffer_size;
}
void AudioServer::_mix_step_for_channel(AudioFrame *p_out_buf, AudioFrame *p_source_buf, AudioFrame p_vol_start, AudioFrame p_vol_final, float p_attenuation_filter_cutoff_hz, float p_highshelf_gain, AudioFilterSW::Processor *p_processor_l, AudioFilterSW::Processor *p_processor_r) {
if (p_highshelf_gain != 0) {
AudioFilterSW filter;
filter.set_mode(AudioFilterSW::HIGHSHELF);
filter.set_sampling_rate(AudioServer::get_singleton()->get_mix_rate());
filter.set_cutoff(p_attenuation_filter_cutoff_hz);
filter.set_resonance(1);
filter.set_stages(1);
filter.set_gain(p_highshelf_gain);
ERR_FAIL_COND(p_processor_l == nullptr);
ERR_FAIL_COND(p_processor_r == nullptr);
bool is_just_started = p_vol_start.l == 0 && p_vol_start.r == 0;
p_processor_l->set_filter(&filter, /* clear_history= */ is_just_started);
p_processor_l->update_coeffs(buffer_size);
p_processor_r->set_filter(&filter, /* clear_history= */ is_just_started);
p_processor_r->update_coeffs(buffer_size);
for (unsigned int frame_idx = 0; frame_idx < buffer_size; frame_idx++) {
// Make this buffer size invariant if buffer_size ever becomes a project setting.
float lerp_param = (float)frame_idx / buffer_size;
AudioFrame vol = p_vol_final * lerp_param + (1 - lerp_param) * p_vol_start;
AudioFrame mixed = vol * p_source_buf[frame_idx];
p_processor_l->process_one_interp(mixed.l);
p_processor_r->process_one_interp(mixed.r);
p_out_buf[frame_idx] += mixed;
}
} else {
for (unsigned int frame_idx = 0; frame_idx < buffer_size; frame_idx++) {
// Make this buffer size invariant if buffer_size ever becomes a project setting.
float lerp_param = (float)frame_idx / buffer_size;
p_out_buf[frame_idx] += (p_vol_final * lerp_param + (1 - lerp_param) * p_vol_start) * p_source_buf[frame_idx];
}
}
}
AudioServer::AudioStreamPlaybackListNode *AudioServer::_find_playback_list_node(Ref<AudioStreamPlayback> p_playback) {
for (AudioStreamPlaybackListNode *playback_list_node : playback_list) {
if (playback_list_node->stream_playback == p_playback) {
return playback_list_node;
}
}
return nullptr;
}
bool AudioServer::thread_has_channel_mix_buffer(int p_bus, int p_buffer) const {
if (p_bus < 0 || p_bus >= buses.size()) {
return false;
@ -923,9 +1123,216 @@ float AudioServer::get_playback_speed_scale() const {
return playback_speed_scale;
}
void AudioServer::start_playback_stream(Ref<AudioStreamPlayback> p_playback, StringName p_bus, Vector<AudioFrame> p_volume_db_vector, float p_start_time) {
ERR_FAIL_COND(p_playback.is_null());
Map<StringName, Vector<AudioFrame>> map;
map[p_bus] = p_volume_db_vector;
start_playback_stream(p_playback, map, p_start_time);
}
void AudioServer::start_playback_stream(Ref<AudioStreamPlayback> p_playback, Map<StringName, Vector<AudioFrame>> p_bus_volumes, float p_start_time) {
ERR_FAIL_COND(p_playback.is_null());
AudioStreamPlaybackListNode *playback_node = new AudioStreamPlaybackListNode();
playback_node->stream_playback = p_playback;
playback_node->stream_playback->start(p_start_time);
AudioStreamPlaybackBusDetails *new_bus_details = new AudioStreamPlaybackBusDetails();
int idx = 0;
for (KeyValue<StringName, Vector<AudioFrame>> pair : p_bus_volumes) {
ERR_FAIL_COND(pair.value.size() < channel_count);
ERR_FAIL_COND(pair.value.size() != MAX_CHANNELS_PER_BUS);
new_bus_details->bus_active[idx] = true;
new_bus_details->bus[idx] = pair.key;
for (int channel_idx = 0; channel_idx < MAX_CHANNELS_PER_BUS; channel_idx++) {
new_bus_details->volume[idx][channel_idx] = pair.value[channel_idx];
}
}
playback_node->bus_details = new_bus_details;
playback_node->prev_bus_details = new AudioStreamPlaybackBusDetails();
playback_node->setseek.set(-1);
playback_node->pitch_scale.set(1);
playback_node->highshelf_gain.set(0);
playback_node->attenuation_filter_cutoff_hz.set(0);
memset(playback_node->prev_bus_details->volume, 0, sizeof(playback_node->prev_bus_details->volume));
for (AudioFrame &frame : playback_node->lookahead) {
frame = AudioFrame(0, 0);
}
playback_node->state.store(AudioStreamPlaybackListNode::PLAYING);
playback_list.insert(playback_node);
}
void AudioServer::stop_playback_stream(Ref<AudioStreamPlayback> p_playback) {
ERR_FAIL_COND(p_playback.is_null());
AudioStreamPlaybackListNode *playback_node = _find_playback_list_node(p_playback);
if (!playback_node) {
return;
}
AudioStreamPlaybackListNode::PlaybackState new_state, old_state;
do {
old_state = playback_node->state.load();
new_state = AudioStreamPlaybackListNode::FADE_OUT_TO_DELETION;
} while (!playback_node->state.compare_exchange_strong(old_state, new_state));
}
void AudioServer::set_playback_bus_exclusive(Ref<AudioStreamPlayback> p_playback, StringName p_bus, Vector<AudioFrame> p_volumes) {
ERR_FAIL_COND(p_volumes.size() != MAX_CHANNELS_PER_BUS);
Map<StringName, Vector<AudioFrame>> map;
map[p_bus] = p_volumes;
set_playback_bus_volumes_linear(p_playback, map);
}
void AudioServer::set_playback_bus_volumes_linear(Ref<AudioStreamPlayback> p_playback, Map<StringName, Vector<AudioFrame>> p_bus_volumes) {
ERR_FAIL_COND(p_bus_volumes.size() > MAX_BUSES_PER_PLAYBACK);
AudioStreamPlaybackListNode *playback_node = _find_playback_list_node(p_playback);
if (!playback_node) {
return;
}
AudioStreamPlaybackBusDetails *old_bus_details, *new_bus_details = new AudioStreamPlaybackBusDetails();
int idx = 0;
for (KeyValue<StringName, Vector<AudioFrame>> pair : p_bus_volumes) {
ERR_FAIL_COND(pair.value.size() < channel_count);
ERR_FAIL_COND(pair.value.size() != MAX_CHANNELS_PER_BUS);
new_bus_details->bus_active[idx] = true;
new_bus_details->bus[idx] = pair.key;
for (int channel_idx = 0; channel_idx < MAX_CHANNELS_PER_BUS; channel_idx++) {
new_bus_details->volume[idx][channel_idx] = pair.value[channel_idx];
}
}
do {
old_bus_details = playback_node->bus_details.load();
} while (!playback_node->bus_details.compare_exchange_strong(old_bus_details, new_bus_details));
bus_details_graveyard.insert(old_bus_details);
}
void AudioServer::set_playback_all_bus_volumes_linear(Ref<AudioStreamPlayback> p_playback, Vector<AudioFrame> p_volumes) {
ERR_FAIL_COND(p_playback.is_null());
ERR_FAIL_COND(p_volumes.size() != MAX_CHANNELS_PER_BUS);
Map<StringName, Vector<AudioFrame>> map;
AudioStreamPlaybackListNode *playback_node = _find_playback_list_node(p_playback);
if (!playback_node) {
return;
}
for (int bus_idx = 0; bus_idx < MAX_BUSES_PER_PLAYBACK; bus_idx++) {
if (playback_node->bus_details.load()->bus_active[bus_idx]) {
map[playback_node->bus_details.load()->bus[bus_idx]] = p_volumes;
}
}
set_playback_bus_volumes_linear(p_playback, map);
}
void AudioServer::set_playback_pitch_scale(Ref<AudioStreamPlayback> p_playback, float p_pitch_scale) {
ERR_FAIL_COND(p_playback.is_null());
AudioStreamPlaybackListNode *playback_node = _find_playback_list_node(p_playback);
if (!playback_node) {
return;
}
playback_node->pitch_scale.set(p_pitch_scale);
}
void AudioServer::set_playback_paused(Ref<AudioStreamPlayback> p_playback, bool p_paused) {
ERR_FAIL_COND(p_playback.is_null());
AudioStreamPlaybackListNode *playback_node = _find_playback_list_node(p_playback);
if (!playback_node) {
return;
}
if (!p_paused && playback_node->state == AudioStreamPlaybackListNode::PLAYING) {
return; // No-op.
}
if (p_paused && (playback_node->state == AudioStreamPlaybackListNode::PAUSED || playback_node->state == AudioStreamPlaybackListNode::FADE_OUT_TO_PAUSE)) {
return; // No-op.
}
AudioStreamPlaybackListNode::PlaybackState new_state, old_state;
do {
old_state = playback_node->state.load();
new_state = p_paused ? AudioStreamPlaybackListNode::FADE_OUT_TO_PAUSE : AudioStreamPlaybackListNode::PLAYING;
} while (!playback_node->state.compare_exchange_strong(old_state, new_state));
}
void AudioServer::set_playback_highshelf_params(Ref<AudioStreamPlayback> p_playback, float p_gain, float p_attenuation_cutoff_hz) {
ERR_FAIL_COND(p_playback.is_null());
AudioStreamPlaybackListNode *playback_node = _find_playback_list_node(p_playback);
if (!playback_node) {
return;
}
playback_node->attenuation_filter_cutoff_hz.set(p_attenuation_cutoff_hz);
playback_node->highshelf_gain.set(p_gain);
}
bool AudioServer::is_playback_active(Ref<AudioStreamPlayback> p_playback) {
ERR_FAIL_COND_V(p_playback.is_null(), false);
AudioStreamPlaybackListNode *playback_node = _find_playback_list_node(p_playback);
if (!playback_node) {
return false;
}
return playback_node->state.load() == AudioStreamPlaybackListNode::PLAYING;
}
float AudioServer::get_playback_position(Ref<AudioStreamPlayback> p_playback) {
ERR_FAIL_COND_V(p_playback.is_null(), 0);
AudioStreamPlaybackListNode *playback_node = _find_playback_list_node(p_playback);
if (!playback_node) {
return 0;
}
return playback_node->stream_playback->get_playback_position();
}
bool AudioServer::is_playback_paused(Ref<AudioStreamPlayback> p_playback) {
ERR_FAIL_COND_V(p_playback.is_null(), false);
AudioStreamPlaybackListNode *playback_node = _find_playback_list_node(p_playback);
if (!playback_node) {
return false;
}
return playback_node->state.load() == AudioStreamPlaybackListNode::PAUSED || playback_node->state.load() == AudioStreamPlaybackListNode::FADE_OUT_TO_PAUSE;
}
uint64_t AudioServer::get_mix_count() const {
return mix_count;
}
void AudioServer::notify_listener_changed() {
for (CallbackItem *ci : listener_changed_callback_list) {
ci->callback(ci->userdata);
}
}
void AudioServer::init_channels_and_buffers() {
channel_count = get_channel_count();
temp_buffer.resize(channel_count);
mix_buffer.resize(buffer_size + LOOKAHEAD_BUFFER_SIZE);
for (int i = 0; i < temp_buffer.size(); i++) {
temp_buffer.write[i].resize(buffer_size);
@ -943,7 +1350,7 @@ void AudioServer::init() {
channel_disable_threshold_db = GLOBAL_DEF_RST("audio/buses/channel_disable_threshold_db", -60.0);
channel_disable_frames = float(GLOBAL_DEF_RST("audio/buses/channel_disable_time", 2.0)) * get_mix_rate();
ProjectSettings::get_singleton()->set_custom_property_info("audio/buses/channel_disable_time", PropertyInfo(Variant::FLOAT, "audio/buses/channel_disable_time", PROPERTY_HINT_RANGE, "0,5,0.01,or_greater"));
buffer_size = 1024; //hardcoded for now
buffer_size = 512; //hardcoded for now
init_channels_and_buffers();
@ -1030,9 +1437,17 @@ void AudioServer::update() {
prof_time = 0;
#endif
for (Set<CallbackItem>::Element *E = update_callbacks.front(); E; E = E->next()) {
E->get().callback(E->get().userdata);
for (CallbackItem *ci : update_callback_list) {
ci->callback(ci->userdata);
}
mix_callback_list.maybe_cleanup();
update_callback_list.maybe_cleanup();
listener_changed_callback_list.maybe_cleanup();
playback_list.maybe_cleanup();
for (AudioStreamPlaybackBusDetails *bus_details : bus_details_graveyard) {
bus_details_graveyard.erase(bus_details, [](AudioStreamPlaybackBusDetails *d) { delete d; });
}
bus_details_graveyard.maybe_cleanup();
}
void AudioServer::load_default_bus_layout() {
@ -1098,40 +1513,49 @@ double AudioServer::get_time_since_last_mix() const {
AudioServer *AudioServer::singleton = nullptr;
void AudioServer::add_callback(AudioCallback p_callback, void *p_userdata) {
lock();
CallbackItem ci;
ci.callback = p_callback;
ci.userdata = p_userdata;
callbacks.insert(ci);
unlock();
}
void AudioServer::remove_callback(AudioCallback p_callback, void *p_userdata) {
lock();
CallbackItem ci;
ci.callback = p_callback;
ci.userdata = p_userdata;
callbacks.erase(ci);
unlock();
}
void AudioServer::add_update_callback(AudioCallback p_callback, void *p_userdata) {
lock();
CallbackItem ci;
ci.callback = p_callback;
ci.userdata = p_userdata;
update_callbacks.insert(ci);
unlock();
CallbackItem *ci = new CallbackItem();
ci->callback = p_callback;
ci->userdata = p_userdata;
update_callback_list.insert(ci);
}
void AudioServer::remove_update_callback(AudioCallback p_callback, void *p_userdata) {
lock();
CallbackItem ci;
ci.callback = p_callback;
ci.userdata = p_userdata;
update_callbacks.erase(ci);
unlock();
for (CallbackItem *ci : update_callback_list) {
if (ci->callback == p_callback && ci->userdata == p_userdata) {
update_callback_list.erase(ci, [](CallbackItem *c) { delete c; });
}
}
}
void AudioServer::add_mix_callback(AudioCallback p_callback, void *p_userdata) {
CallbackItem *ci = new CallbackItem();
ci->callback = p_callback;
ci->userdata = p_userdata;
mix_callback_list.insert(ci);
}
void AudioServer::remove_mix_callback(AudioCallback p_callback, void *p_userdata) {
for (CallbackItem *ci : mix_callback_list) {
if (ci->callback == p_callback && ci->userdata == p_userdata) {
mix_callback_list.erase(ci, [](CallbackItem *c) { delete c; });
}
}
}
void AudioServer::add_listener_changed_callback(AudioCallback p_callback, void *p_userdata) {
CallbackItem *ci = new CallbackItem();
ci->callback = p_callback;
ci->userdata = p_userdata;
listener_changed_callback_list.insert(ci);
}
void AudioServer::remove_listener_changed_callback(AudioCallback p_callback, void *p_userdata) {
for (CallbackItem *ci : listener_changed_callback_list) {
if (ci->callback == p_callback && ci->userdata == p_userdata) {
listener_changed_callback_list.erase(ci, [](CallbackItem *c) { delete c; });
}
}
}
void AudioServer::set_bus_layout(const Ref<AudioBusLayout> &p_bus_layout) {

View file

@ -34,12 +34,17 @@
#include "core/math/audio_frame.h"
#include "core/object/class_db.h"
#include "core/os/os.h"
#include "core/templates/safe_list.h"
#include "core/variant/variant.h"
#include "servers/audio/audio_effect.h"
#include "servers/audio/audio_filter_sw.h"
#include <atomic>
class AudioDriverDummy;
class AudioStream;
class AudioStreamSample;
class AudioStreamPlayback;
class AudioDriver {
static AudioDriver *singleton;
@ -155,7 +160,10 @@ public:
};
enum {
AUDIO_DATA_INVALID_ID = -1
AUDIO_DATA_INVALID_ID = -1,
MAX_CHANNELS_PER_BUS = 4,
MAX_BUSES_PER_PLAYBACK = 6,
LOOKAHEAD_BUFFER_SIZE = 32,
};
typedef void (*AudioCallback)(void *p_userdata);
@ -219,7 +227,43 @@ private:
int index_cache;
};
struct AudioStreamPlaybackBusDetails {
bool bus_active[MAX_BUSES_PER_PLAYBACK] = { false, false, false, false, false, false };
StringName bus[MAX_BUSES_PER_PLAYBACK];
AudioFrame volume[MAX_BUSES_PER_PLAYBACK][MAX_CHANNELS_PER_BUS];
};
struct AudioStreamPlaybackListNode {
enum PlaybackState {
PAUSED = 0, // Paused. Keep this stream playback around though so it can be restarted.
PLAYING = 1, // Playing. Fading may still be necessary if volume changes!
FADE_OUT_TO_PAUSE = 2, // About to pause.
FADE_OUT_TO_DELETION = 3, // About to stop.
AWAITING_DELETION = 4,
};
// If zero or positive, a place in the stream to seek to during the next mix.
SafeNumeric<float> setseek;
SafeNumeric<float> pitch_scale;
SafeNumeric<float> highshelf_gain;
SafeNumeric<float> attenuation_filter_cutoff_hz; // This isn't used unless highshelf_gain is nonzero.
AudioFilterSW::Processor filter_process[8];
// Updating this ref after the list node is created breaks consistency guarantees, don't do it!
Ref<AudioStreamPlayback> stream_playback;
// Playback state determines the fate of a particular AudioStreamListNode during the mix step. Must be atomically replaced.
std::atomic<PlaybackState> state = AWAITING_DELETION;
// This data should only ever be modified by an atomic replacement of the pointer.
std::atomic<AudioStreamPlaybackBusDetails *> bus_details = nullptr;
// Previous bus details should only be accessed on the audio thread.
AudioStreamPlaybackBusDetails *prev_bus_details = nullptr;
// The next few samples are stored here so we have some time to fade audio out if it ends abruptly at the beginning of the next mix.
AudioFrame lookahead[LOOKAHEAD_BUFFER_SIZE];
};
SafeList<AudioStreamPlaybackListNode *> playback_list;
SafeList<AudioStreamPlaybackBusDetails *> bus_details_graveyard;
Vector<Vector<AudioFrame>> temp_buffer; //temp_buffer for each level
Vector<AudioFrame> mix_buffer;
Vector<Bus *> buses;
Map<StringName, Bus *> bus_map;
@ -230,18 +274,19 @@ private:
void init_channels_and_buffers();
void _mix_step();
void _mix_step_for_channel(AudioFrame *p_out_buf, AudioFrame *p_source_buf, AudioFrame p_vol_start, AudioFrame p_vol_final, float p_attenuation_filter_cutoff_hz, float p_highshelf_gain, AudioFilterSW::Processor *p_processor_l, AudioFilterSW::Processor *p_processor_r);
// Should only be called on the main thread.
AudioStreamPlaybackListNode *_find_playback_list_node(Ref<AudioStreamPlayback> p_playback);
struct CallbackItem {
AudioCallback callback;
void *userdata;
bool operator<(const CallbackItem &p_item) const {
return (callback == p_item.callback ? userdata < p_item.userdata : callback < p_item.callback);
}
};
Set<CallbackItem> callbacks;
Set<CallbackItem> update_callbacks;
SafeList<CallbackItem *> update_callback_list;
SafeList<CallbackItem *> mix_callback_list;
SafeList<CallbackItem *> listener_changed_callback_list;
friend class AudioDriver;
void _driver_process(int p_frames, int32_t *p_buffer);
@ -319,6 +364,25 @@ public:
void set_playback_speed_scale(float p_scale);
float get_playback_speed_scale() const;
void start_playback_stream(Ref<AudioStreamPlayback> p_playback, StringName p_bus, Vector<AudioFrame> p_volume_db_vector, float p_start_time = 0);
void start_playback_stream(Ref<AudioStreamPlayback> p_playback, Map<StringName, Vector<AudioFrame>> p_bus_volumes, float p_start_time = 0);
void stop_playback_stream(Ref<AudioStreamPlayback> p_playback);
void set_playback_bus_exclusive(Ref<AudioStreamPlayback> p_playback, StringName p_bus, Vector<AudioFrame> p_volumes);
void set_playback_bus_volumes_linear(Ref<AudioStreamPlayback> p_playback, Map<StringName, Vector<AudioFrame>> p_bus_volumes);
void set_playback_all_bus_volumes_linear(Ref<AudioStreamPlayback> p_playback, Vector<AudioFrame> p_volumes);
void set_playback_pitch_scale(Ref<AudioStreamPlayback> p_playback, float p_pitch_scale);
void set_playback_paused(Ref<AudioStreamPlayback> p_playback, bool p_paused);
void set_playback_highshelf_params(Ref<AudioStreamPlayback> p_playback, float p_gain, float p_attenuation_cutoff_hz);
bool is_playback_active(Ref<AudioStreamPlayback> p_playback);
float get_playback_position(Ref<AudioStreamPlayback> p_playback);
bool is_playback_paused(Ref<AudioStreamPlayback> p_playback);
uint64_t get_mix_count() const;
void notify_listener_changed();
virtual void init();
virtual void finish();
virtual void update();
@ -340,12 +404,15 @@ public:
virtual double get_time_to_next_mix() const;
virtual double get_time_since_last_mix() const;
void add_callback(AudioCallback p_callback, void *p_userdata);
void remove_callback(AudioCallback p_callback, void *p_userdata);
void add_listener_changed_callback(AudioCallback p_callback, void *p_userdata);
void remove_listener_changed_callback(AudioCallback p_callback, void *p_userdata);
void add_update_callback(AudioCallback p_callback, void *p_userdata);
void remove_update_callback(AudioCallback p_callback, void *p_userdata);
void add_mix_callback(AudioCallback p_callback, void *p_userdata);
void remove_mix_callback(AudioCallback p_callback, void *p_userdata);
void set_bus_layout(const Ref<AudioBusLayout> &p_bus_layout);
Ref<AudioBusLayout> generate_bus_layout() const;