diff options
author | V3n3RiX <venerix@koprulu.sector> | 2024-07-06 08:06:16 +0100 |
---|---|---|
committer | V3n3RiX <venerix@koprulu.sector> | 2024-07-06 08:06:16 +0100 |
commit | 4187bba080530c5ca1c7dae9c233e88f3fc8f535 (patch) | |
tree | b6f535e053876097ced1b6bda14a4da890c730d4 /media-video/pipewire | |
parent | 2a8d2f71d1d9963368e0ef3d641d75979a689d12 (diff) |
gentoo auto-resync : 06:07:2024 - 08:06:15
Diffstat (limited to 'media-video/pipewire')
-rw-r--r-- | media-video/pipewire/Manifest | 7 | ||||
-rw-r--r-- | media-video/pipewire/files/1.2.0/0001-impl-node-fix-required-state-for-async-driver-nodes.patch | 82 | ||||
-rw-r--r-- | media-video/pipewire/files/1.2.0/0002-module-raop-only-set-softVolume-when-valid.patch | 30 | ||||
-rw-r--r-- | media-video/pipewire/files/1.2.0/0003-context-Fix-node-collect-with-groups-and-sync-enable.patch | 149 | ||||
-rw-r--r-- | media-video/pipewire/files/1.2.0/0004-impl-node-disable-async-for-driver-nodes.patch | 112 | ||||
-rw-r--r-- | media-video/pipewire/files/1.2.0/0005-impl-node-set-INACTIVE-state-on-server.patch | 30 | ||||
-rw-r--r-- | media-video/pipewire/pipewire-1.2.0-r2.ebuild (renamed from media-video/pipewire/pipewire-1.2.0-r1.ebuild) | 0 |
7 files changed, 409 insertions, 1 deletions
diff --git a/media-video/pipewire/Manifest b/media-video/pipewire/Manifest index c434e7546592..aee6f904eb0e 100644 --- a/media-video/pipewire/Manifest +++ b/media-video/pipewire/Manifest @@ -1,3 +1,8 @@ +AUX 1.2.0/0001-impl-node-fix-required-state-for-async-driver-nodes.patch 3030 BLAKE2B 372d33b155cd8b798265b6dd04296179adcfcddeefd7253a9979f4722b343f68a7cbe128f1f4b24acda9c099d6fc12e75d14c1ec184b5f25a5f3667c5fc85383 SHA512 19803d71e99cc6b79e96b1e8d96c7d279fe345d67df8242dd8589cc8a2eb8549ed879cb8c3a00d9c7d780329dfc3821dca13fb19dea366b5f86f733413586b30 +AUX 1.2.0/0002-module-raop-only-set-softVolume-when-valid.patch 1048 BLAKE2B 326704effb2a904079c33d07c6557c854ef6842a9f3ade996d1a8a4f1e5d073246426d7c94f5e44f9bf195877326972368cecd9cad6883a6c7a33973a4447284 SHA512 a125dd8d7ac121c7fb66211bcac9a9202f654ba5550b237a55588b8153ac6523b350038305df930b3eb5ac640c53387c56b7b8cc3d29a124180425355f32416b +AUX 1.2.0/0003-context-Fix-node-collect-with-groups-and-sync-enable.patch 5206 BLAKE2B 5d4d6ded0a8cf79c7181c40707329d11a00b4d543b46e0e3bf37985299ca0cdd2782a1c9843469f160332d8bd4a987e10165dc25c99405defc2e02119f665d0b SHA512 37b972529cd4f702863a7e19d1980439daab26ee5feed548a204d969c8af9dcaae29a58ea3b87429c0da0b1220ad4e8b719f294103feebc500ed872d5215c565 +AUX 1.2.0/0004-impl-node-disable-async-for-driver-nodes.patch 4717 BLAKE2B 92ddbc78ff24a385bc54a88c46f3654456c8db27229715a4923790cd0e271b32fe245295b37eda6023e2827c3791dc5bf29411edcdad9a7b91c4e593dbe7c34a SHA512 1fb1b85ee717952b8551dc943c72aa158279aacae5666649b8a2b0c1f63ad9d681817be9d5d1b85f4e51aee6b43febda07d01c85e6ba72c1a31323adc4098a02 +AUX 1.2.0/0005-impl-node-set-INACTIVE-state-on-server.patch 1188 BLAKE2B e54b9973f7529b0a6facdc632a2e9c8fd2080171882e5dc081da6ac7e241266eca5186d11d09670281ae772b3054190d64220d2cf0b3e7ca8853d6a9667f8add SHA512 49261c21e7d41d7e1017a8ab5c2f72056a68b10d4d5ac5ab1fe66418cad727fc0ba915dc8f551640ee1b6a54285981dd19fdc999784dbbac7facdee30c7271f3 AUX 99-pipewire-default-hook.conf 540 BLAKE2B cec76e78db9db290054bf766774c284d7a7a1374c453802eb1bdc19db9289856ec7b8643acbab916a39ca859d543deca111e3b0f110d1904343221c1573ffc17 SHA512 ef891e38c6f40fce2904e240307147a58c48d780e8470dac8e28044cfb86ddeb163885a2362687791779379ad814f58ad5649b75ac4e82931c5bdd3f37a6ed01 AUX gentoo-pipewire-launcher.1 1633 BLAKE2B 36c8888693bd0389dc9b1e49d0501d2679956d88f6b7ac42f0347c3fe2259775dd7082cb0bbdeb54a232fcc8902faa8086203e0fe2437c0eea5f1927ba2d0de4 SHA512 8085c9a989f21ee7f2528d56d5b104c0d14024df06f5cb6cd2aa8168b58469c71b91c788706b11021d9dbddb647a2203175603f4f8aa7b33a63a7b7cf8b59d1f AUX gentoo-pipewire-launcher.in-r3 2848 BLAKE2B 5e6b8ca2df88cef240ca1ff4de28cc8ed3590fabd29b903b37880baf9cf52c35ca01be25b031898d4ee06f4600a5f0c61025a244778577a562f4a4bfa14e2aca SHA512 43cf3e07058e86bf6651106f7f192022c05c5640ba8e101b88e98a83efcc5614f9474107b6d0daf3d5234a51ebcc0a3a7ae5b359d9ba36b648c125813f4e1334 @@ -17,6 +22,6 @@ EBUILD pipewire-1.0.5-r1.ebuild 19283 BLAKE2B c873436b2f3cad35aa2d6797085823cc21 EBUILD pipewire-1.0.5.ebuild 19988 BLAKE2B 49df8fcf2e136537401ebab8673c0815a275c2deb29a2779306a77536d2db0688c81e69bbf768061a2ee3e94888da5e77e12055594bf84b14951fd28f12e248c SHA512 5d6506afa393f6286ba33a88cdf17f739a265e2f09baf1b7f9ff5fb239a7b4d87b32ce553d366bfaf6ba36f4384982cf73a521488118d2e5f33d87b4cc9467e3 EBUILD pipewire-1.0.6.ebuild 19283 BLAKE2B c873436b2f3cad35aa2d6797085823cc215d621b275634c25c3b79e00a56121a97824f3092df74b13809a9ea7639b669710fb9ae7476441209c9f7aff1496827 SHA512 f290058849e339a64bb897e831618b9f886b9c6b7754a0f3b4301fc94be929c0531ffd6c98754228c30bc308371e508021fa907252028d6a5a57d14314d4b5b6 EBUILD pipewire-1.0.7-r1.ebuild 19565 BLAKE2B c0250fdcf5157412d74ef4c020d64b571f84cf75296fe900a4fa964352782f440bc05b413df6475e227c42b526b9e423f23aafa19debc65d18018a43a85452cf SHA512 77f9b34964a14f34a855a8061860b4ce5a6de0ad75f99bc1d599c9c0ad318b6e4604b449e7a0c0483cc4439ce39d4233d2a2cedc1f92ad2b7c3e12ba317841bc -EBUILD pipewire-1.2.0-r1.ebuild 19711 BLAKE2B 65857f3b19d08a242973dc65fc333556b8ec0d8e91f56e712706ff8a217f3aa5cecdaa7dd43002e8391b61feb96e818c852d68a8cf4d9930906cd7fbf3d228f1 SHA512 bc94f1000e8ad0b7ba91199d240fa8ac15e55f50bab175be6390d040238f8682df5235a5871aa528c5cd36c429a8bc1a7a838660614265696862389ac9cea6c9 +EBUILD pipewire-1.2.0-r2.ebuild 19711 BLAKE2B 65857f3b19d08a242973dc65fc333556b8ec0d8e91f56e712706ff8a217f3aa5cecdaa7dd43002e8391b61feb96e818c852d68a8cf4d9930906cd7fbf3d228f1 SHA512 bc94f1000e8ad0b7ba91199d240fa8ac15e55f50bab175be6390d040238f8682df5235a5871aa528c5cd36c429a8bc1a7a838660614265696862389ac9cea6c9 EBUILD pipewire-9999.ebuild 19591 BLAKE2B 4c2d6315cad7fb2746817dbc15fe29f3a2007e8abc83a338952ee0f82a1113dbfd8d0426595fd6d5c3c1ea7e4a1d7b87e135b2202e563b95a06ad4403cf684e9 SHA512 f8341246a2c4403375f8493966d42801d1a4846d2a3a6e9464421a50bddf95dca5c3b7832c042e63ef68773550e009235e55d98f3a90178419ee15c73ddad936 MISC metadata.xml 2156 BLAKE2B 974459115e0f3cf4e4c3ac15159740b42e01a093da37d8a27f80e122c9ab2a3fe9194443eaf08f9b66d613db90a14465abbe1b76816bce90e11a46c8409c7513 SHA512 cfe0fdb86c993f167507e22635878d6d7d5dcd48f4c23323231263551ceff986fc454700428ecb7d2ee2abe82093c35d8e7bd491020fd6dd3f3889c09e9020bb diff --git a/media-video/pipewire/files/1.2.0/0001-impl-node-fix-required-state-for-async-driver-nodes.patch b/media-video/pipewire/files/1.2.0/0001-impl-node-fix-required-state-for-async-driver-nodes.patch new file mode 100644 index 000000000000..105c8dd1676a --- /dev/null +++ b/media-video/pipewire/files/1.2.0/0001-impl-node-fix-required-state-for-async-driver-nodes.patch @@ -0,0 +1,82 @@ +From b8d07e40d66f12ac28aab710cfeb181bf25bc59a Mon Sep 17 00:00:00 2001 +From: Wim Taymans <wtaymans@redhat.com> +Date: Mon, 1 Jul 2024 10:36:09 +0200 +Subject: [PATCH 1/5] impl-node: fix required state for async driver nodes + +When the node activation.required was incremented because it was a +driver, only decrement it in that case, regardless of the current driver +state of the node. + +This fixes the case of KODI where the required field gets out of sync +and things become unschedulable. + +Fixes #4087 +--- + src/pipewire/impl-node.c | 22 ++++++++++++++-------- + src/pipewire/private.h | 1 + + 2 files changed, 15 insertions(+), 8 deletions(-) + +diff --git a/src/pipewire/impl-node.c b/src/pipewire/impl-node.c +index 12629ee64..4def52897 100644 +--- a/src/pipewire/impl-node.c ++++ b/src/pipewire/impl-node.c +@@ -112,13 +112,17 @@ static inline void activate_target(struct pw_impl_node *node, struct pw_node_tar + { + struct pw_node_activation_state *state = &t->activation->state[0]; + if (!t->active) { +- if ((!node->async || node->driving) && !node->exported) { +- SPA_ATOMIC_INC(state->required); +- SPA_ATOMIC_INC(state->pending); ++ if (!node->async || node->driving) { ++ if (!node->exported) { ++ SPA_ATOMIC_INC(state->required); ++ SPA_ATOMIC_INC(state->pending); ++ } + } ++ t->active_driving = node->driving; + t->active = true; +- pw_log_debug("%p: target state:%p id:%d pending:%d/%d", +- node, state, t->id, state->pending, state->required); ++ pw_log_debug("%p: target state:%p id:%d pending:%d/%d %d:%d:%d", ++ node, state, t->id, state->pending, state->required, ++ node->async, node->driving, node->exported); + } + } + +@@ -126,7 +130,7 @@ static inline void deactivate_target(struct pw_impl_node *node, struct pw_node_t + { + if (t->active) { + struct pw_node_activation_state *state = &t->activation->state[0]; +- if (!node->async || node->driving) { ++ if (!node->async || t->active_driving) { + /* the driver copies the required to the pending state + * so first try to resume the node and then decrement the + * required state. This way we either resume with the old value +@@ -137,8 +141,10 @@ static inline void deactivate_target(struct pw_impl_node *node, struct pw_node_t + SPA_ATOMIC_DEC(state->required); + } + t->active = false; +- pw_log_debug("%p: target state:%p id:%d pending:%d/%d trigger:%"PRIu64, +- node, state, t->id, state->pending, state->required, trigger); ++ t->active_driving = false; ++ pw_log_debug("%p: target state:%p id:%d pending:%d/%d %d:%d:%d trigger:%"PRIu64, ++ node, state, t->id, state->pending, state->required, ++ node->async, node->driving, node->exported, trigger); + } + } + +diff --git a/src/pipewire/private.h b/src/pipewire/private.h +index 8c01fe8d5..25af677ac 100644 +--- a/src/pipewire/private.h ++++ b/src/pipewire/private.h +@@ -541,6 +541,7 @@ struct pw_node_target { + int fd; + void (*trigger)(struct pw_node_target *t, uint64_t nsec); + unsigned int active:1; ++ unsigned int active_driving:1; + unsigned int added:1; + }; + +-- +2.45.2 + diff --git a/media-video/pipewire/files/1.2.0/0002-module-raop-only-set-softVolume-when-valid.patch b/media-video/pipewire/files/1.2.0/0002-module-raop-only-set-softVolume-when-valid.patch new file mode 100644 index 000000000000..343b42dfdc8a --- /dev/null +++ b/media-video/pipewire/files/1.2.0/0002-module-raop-only-set-softVolume-when-valid.patch @@ -0,0 +1,30 @@ +From 82b9fa118f2fa009b5eb2891378fe003e2573bbe Mon Sep 17 00:00:00 2001 +From: Wim Taymans <wtaymans@redhat.com> +Date: Mon, 1 Jul 2024 11:27:17 +0200 +Subject: [PATCH 2/5] module-raop: only set softVolume when valid + +--- + src/modules/module-raop-sink.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/src/modules/module-raop-sink.c b/src/modules/module-raop-sink.c +index 05e467d24..8ad28693f 100644 +--- a/src/modules/module-raop-sink.c ++++ b/src/modules/module-raop-sink.c +@@ -1643,10 +1643,10 @@ static void stream_props_changed(struct impl *impl, uint32_t id, const struct sp + impl->volume = volume; + + rtsp_send_volume(impl); ++ spa_pod_builder_prop(&b, SPA_PROP_softVolumes, 0); ++ spa_pod_builder_array(&b, sizeof(float), SPA_TYPE_Float, ++ n_vols, soft_vols); + } +- spa_pod_builder_prop(&b, SPA_PROP_softVolumes, 0); +- spa_pod_builder_array(&b, sizeof(float), SPA_TYPE_Float, +- n_vols, soft_vols); + spa_pod_builder_raw_padded(&b, prop, SPA_POD_PROP_SIZE(prop)); + break; + } +-- +2.45.2 + diff --git a/media-video/pipewire/files/1.2.0/0003-context-Fix-node-collect-with-groups-and-sync-enable.patch b/media-video/pipewire/files/1.2.0/0003-context-Fix-node-collect-with-groups-and-sync-enable.patch new file mode 100644 index 000000000000..5e18550bf193 --- /dev/null +++ b/media-video/pipewire/files/1.2.0/0003-context-Fix-node-collect-with-groups-and-sync-enable.patch @@ -0,0 +1,149 @@ +From e6c0014f94e995e49b72bea7ae56b960416e6b29 Mon Sep 17 00:00:00 2001 +From: Wim Taymans <wtaymans@redhat.com> +Date: Mon, 1 Jul 2024 14:50:34 +0200 +Subject: [PATCH 3/5] context: Fix node collect with groups and sync enabled + +Keep track of the sync nodes we added to a driver and bring in the other +nodes from the same sync group, group or link groups. This makes it +possible to have disjoint sync groups each with their own driver. + +Fixes export in ardour8 + +Fixes #4083 +--- + src/pipewire/context.c | 49 +++++++++++++++++++++--------------------- + 1 file changed, 25 insertions(+), 24 deletions(-) + +diff --git a/src/pipewire/context.c b/src/pipewire/context.c +index 686dd5eee..f3e1b4d76 100644 +--- a/src/pipewire/context.c ++++ b/src/pipewire/context.c +@@ -1163,13 +1163,14 @@ static inline int run_nodes(struct pw_context *context, struct pw_impl_node *nod + * This ensures that we only activate the paths from the runnable nodes to the + * driver nodes and leave the other nodes idle. + */ +-static int collect_nodes(struct pw_context *context, struct pw_impl_node *node, struct spa_list *collect, +- char **sync) ++static int collect_nodes(struct pw_context *context, struct pw_impl_node *node, struct spa_list *collect) + { + struct spa_list queue; + struct pw_impl_node *n, *t; + struct pw_impl_port *p; + struct pw_impl_link *l; ++ uint32_t n_sync; ++ char *sync[MAX_SYNC+1]; + + pw_log_debug("node %p: '%s'", node, node->name); + +@@ -1178,20 +1179,30 @@ static int collect_nodes(struct pw_context *context, struct pw_impl_node *node, + spa_list_append(&queue, &node->sort_link); + node->visited = true; + ++ n_sync = 0; ++ sync[0] = NULL; ++ + /* now follow all the links from the nodes in the queue + * and add the peers to the queue. */ + spa_list_consume(n, &queue, sort_link) { + spa_list_remove(&n->sort_link); + spa_list_append(collect, &n->sort_link); + +- pw_log_debug(" next node %p: '%s' runnable:%u", n, n->name, n->runnable); ++ pw_log_debug(" next node %p: '%s' runnable:%u active:%d", ++ n, n->name, n->runnable, n->active); + + if (!n->active) + continue; + +- if (sync[0] != NULL) { +- if (pw_strv_find_common(n->sync_groups, sync) < 0) +- continue; ++ if (n->sync) { ++ for (uint32_t i = 0; n->sync_groups[i]; i++) { ++ if (n_sync >= MAX_SYNC) ++ break; ++ if (pw_strv_find(sync, n->sync_groups[i]) >= 0) ++ continue; ++ sync[n_sync++] = n->sync_groups[i]; ++ sync[n_sync] = NULL; ++ } + } + + spa_list_for_each(p, &n->input_ports, link) { +@@ -1242,6 +1253,8 @@ static int collect_nodes(struct pw_context *context, struct pw_impl_node *node, + spa_list_for_each(t, &context->node_list, link) { + if (t->exported || !t->active || t->visited) + continue; ++ /* the other node will be scheduled with this one if it's in ++ * the same group or link group */ + if (pw_strv_find_common(t->groups, n->groups) < 0 && + pw_strv_find_common(t->link_groups, n->link_groups) < 0 && + pw_strv_find_common(t->sync_groups, sync) < 0) +@@ -1253,7 +1266,8 @@ static int collect_nodes(struct pw_context *context, struct pw_impl_node *node, + spa_list_append(&queue, &t->sort_link); + } + } +- pw_log_debug(" next node %p: '%s' runnable:%u", n, n->name, n->runnable); ++ pw_log_debug(" next node %p: '%s' runnable:%u %p %p %p", n, n->name, n->runnable, ++ n->groups, n->link_groups, sync); + } + spa_list_for_each(n, collect, sort_link) + if (!n->driving && n->runnable) { +@@ -1497,10 +1511,9 @@ int pw_context_recalc_graph(struct pw_context *context, const char *reason) + struct pw_impl_node *n, *s, *target, *fallback; + const uint32_t *rates; + uint32_t max_quantum, min_quantum, def_quantum, rate_quantum, floor_quantum, ceil_quantum; +- uint32_t n_rates, def_rate, n_sync; ++ uint32_t n_rates, def_rate; + bool freewheel, global_force_rate, global_force_quantum, transport_start; + struct spa_list collect; +- char *sync[MAX_SYNC+1]; + + pw_log_info("%p: busy:%d reason:%s", context, impl->recalc, reason); + +@@ -1514,23 +1527,11 @@ again: + freewheel = false; + transport_start = false; + +- /* clean up the flags first and collect sync */ +- n_sync = 0; +- sync[0] = NULL; ++ /* clean up the flags first */ + spa_list_for_each(n, &context->node_list, link) { + n->visited = false; + n->checked = 0; + n->runnable = n->always_process && n->active; +- if (n->sync) { +- for (uint32_t i = 0; n->sync_groups[i]; i++) { +- if (n_sync >= MAX_SYNC) +- break; +- if (pw_strv_find(sync, n->sync_groups[i]) >= 0) +- continue; +- sync[n_sync++] = n->sync_groups[i]; +- sync[n_sync] = NULL; +- } +- } + } + + get_quantums(context, &def_quantum, &min_quantum, &max_quantum, &rate_quantum, +@@ -1551,7 +1552,7 @@ again: + + if (!n->visited) { + spa_list_init(&collect); +- collect_nodes(context, n, &collect, sync); ++ collect_nodes(context, n, &collect); + move_to_driver(context, &collect, n); + } + /* from now on we are only interested in active driving nodes +@@ -1605,7 +1606,7 @@ again: + + /* collect all nodes in this group */ + spa_list_init(&collect); +- collect_nodes(context, n, &collect, sync); ++ collect_nodes(context, n, &collect); + + driver = NULL; + spa_list_for_each(t, &collect, sort_link) { +-- +2.45.2 + diff --git a/media-video/pipewire/files/1.2.0/0004-impl-node-disable-async-for-driver-nodes.patch b/media-video/pipewire/files/1.2.0/0004-impl-node-disable-async-for-driver-nodes.patch new file mode 100644 index 000000000000..a7528249f1e3 --- /dev/null +++ b/media-video/pipewire/files/1.2.0/0004-impl-node-disable-async-for-driver-nodes.patch @@ -0,0 +1,112 @@ +From 525360d70ab1698afaaaf20f7e58002b8756353f Mon Sep 17 00:00:00 2001 +From: Wim Taymans <wtaymans@redhat.com> +Date: Wed, 3 Jul 2024 13:31:24 +0200 +Subject: [PATCH 4/5] impl-node: disable async for driver nodes + +Make it so that a driver node can never be scheduled async. It could +possibly make sense when the driver node is not currently driving the +graph but when it drives the graph it always needs to be sync. This +also simplifies the target activation because we can simply check the +async state and ignore if the node is driving or not. + +Also make sure that we never make an async link with a driver output port. +This does not make sense because the driver node will always be +triggered sync first and before the async node so we can simply make +a sync link. + +This fixes the modified (only generate 1 buffer) video-src -> video-play +case where the buffer never arrives in video-play because of the +useless async link. + +Fixes #4092 +--- + src/pipewire/impl-link.c | 8 +++++--- + src/pipewire/impl-node.c | 7 +++---- + src/pipewire/private.h | 1 - + 3 files changed, 8 insertions(+), 8 deletions(-) + +diff --git a/src/pipewire/impl-link.c b/src/pipewire/impl-link.c +index 39e9bd17d..6dc2e1a59 100644 +--- a/src/pipewire/impl-link.c ++++ b/src/pipewire/impl-link.c +@@ -1321,7 +1321,8 @@ struct pw_impl_link *pw_context_create_link(struct pw_context *context, + if (this->passive && str == NULL) + pw_properties_set(properties, PW_KEY_LINK_PASSIVE, "true"); + +- impl->async = (output_node->async || input_node->async) && ++ impl->async = !output_node->driver && ++ (output_node->async || input_node->async) && + SPA_FLAG_IS_SET(output->flags, PW_IMPL_PORT_FLAG_ASYNC) && + SPA_FLAG_IS_SET(input->flags, PW_IMPL_PORT_FLAG_ASYNC); + +@@ -1375,8 +1376,9 @@ struct pw_impl_link *pw_context_create_link(struct pw_context *context, + this->name = spa_aprintf("%d.%d.%d -> %d.%d.%d", + output_node->info.id, output->port_id, this->rt.out_mix.port.port_id, + input_node->info.id, input->port_id, this->rt.in_mix.port.port_id); +- pw_log_info("(%s) (%s) -> (%s) async:%04x:%04x:%d", this->name, output_node->name, +- input_node->name, output->flags, input->flags, impl->async); ++ pw_log_info("(%s) (%s) -> (%s) async:%d:%04x:%04x:%d", this->name, output_node->name, ++ input_node->name, output_node->driving, ++ output->flags, input->flags, impl->async); + + pw_impl_port_emit_link_added(output, this); + pw_impl_port_emit_link_added(input, this); +diff --git a/src/pipewire/impl-node.c b/src/pipewire/impl-node.c +index 4def52897..c75e5793e 100644 +--- a/src/pipewire/impl-node.c ++++ b/src/pipewire/impl-node.c +@@ -112,13 +112,12 @@ static inline void activate_target(struct pw_impl_node *node, struct pw_node_tar + { + struct pw_node_activation_state *state = &t->activation->state[0]; + if (!t->active) { +- if (!node->async || node->driving) { ++ if (!node->async) { + if (!node->exported) { + SPA_ATOMIC_INC(state->required); + SPA_ATOMIC_INC(state->pending); + } + } +- t->active_driving = node->driving; + t->active = true; + pw_log_debug("%p: target state:%p id:%d pending:%d/%d %d:%d:%d", + node, state, t->id, state->pending, state->required, +@@ -130,7 +129,7 @@ static inline void deactivate_target(struct pw_impl_node *node, struct pw_node_t + { + if (t->active) { + struct pw_node_activation_state *state = &t->activation->state[0]; +- if (!node->async || t->active_driving) { ++ if (!node->async) { + /* the driver copies the required to the pending state + * so first try to resume the node and then decrement the + * required state. This way we either resume with the old value +@@ -141,7 +140,6 @@ static inline void deactivate_target(struct pw_impl_node *node, struct pw_node_t + SPA_ATOMIC_DEC(state->required); + } + t->active = false; +- t->active_driving = false; + pw_log_debug("%p: target state:%p id:%d pending:%d/%d %d:%d:%d trigger:%"PRIu64, + node, state, t->id, state->pending, state->required, + node->async, node->driving, node->exported, trigger); +@@ -1202,6 +1200,7 @@ static void check_properties(struct pw_impl_node *node) + recalc_reason = "transport changed"; + } + async = pw_properties_get_bool(node->properties, PW_KEY_NODE_ASYNC, false); ++ async &= !node->driver; + if (async != node->async) { + pw_log_info("%p: async %d -> %d", node, node->async, async); + node->async = async; +diff --git a/src/pipewire/private.h b/src/pipewire/private.h +index 25af677ac..8c01fe8d5 100644 +--- a/src/pipewire/private.h ++++ b/src/pipewire/private.h +@@ -541,7 +541,6 @@ struct pw_node_target { + int fd; + void (*trigger)(struct pw_node_target *t, uint64_t nsec); + unsigned int active:1; +- unsigned int active_driving:1; + unsigned int added:1; + }; + +-- +2.45.2 + diff --git a/media-video/pipewire/files/1.2.0/0005-impl-node-set-INACTIVE-state-on-server.patch b/media-video/pipewire/files/1.2.0/0005-impl-node-set-INACTIVE-state-on-server.patch new file mode 100644 index 000000000000..9821c151c97b --- /dev/null +++ b/media-video/pipewire/files/1.2.0/0005-impl-node-set-INACTIVE-state-on-server.patch @@ -0,0 +1,30 @@ +From d08df293a95ce976df1cc8c3ec367a8d5d84db35 Mon Sep 17 00:00:00 2001 +From: Wim Taymans <wtaymans@redhat.com> +Date: Wed, 3 Jul 2024 17:42:39 +0200 +Subject: [PATCH 5/5] impl-node: set INACTIVE state on server + +Don't wait for the client to set the INACTIVE state, do it on the +server. We already decremented the target required so we don't want to +schedule the node anymore. + +Fixes some xruns when removing nodes in a stress test. +--- + src/pipewire/impl-node.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/pipewire/impl-node.c b/src/pipewire/impl-node.c +index c75e5793e..be25aa83e 100644 +--- a/src/pipewire/impl-node.c ++++ b/src/pipewire/impl-node.c +@@ -221,7 +221,7 @@ do_node_unprepare(struct spa_loop *loop, bool async, uint32_t seq, + if (!this->rt.prepared) + return 0; + +- if (!this->remote || this->rt.target.activation->client_version < 1) { ++ if (!this->exported) { + /* We mark ourself as finished now, this will avoid going further into the process loop + * in case our fd was ready (removing ourselfs from the loop should avoid that as well). + * If we were supposed to be scheduled make sure we continue the graph for the peers we +-- +2.45.2 + diff --git a/media-video/pipewire/pipewire-1.2.0-r1.ebuild b/media-video/pipewire/pipewire-1.2.0-r2.ebuild index f4aeba1f7ea4..f4aeba1f7ea4 100644 --- a/media-video/pipewire/pipewire-1.2.0-r1.ebuild +++ b/media-video/pipewire/pipewire-1.2.0-r2.ebuild |