Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
Optimize :meth:`set.intersection_update` by replacing the general-purpose
``set_swap_bodies()`` with a specialized ``set_replace_body()`` that skips
unnecessary atomic operations and checks for the temporary set argument.
102 changes: 48 additions & 54 deletions Objects/setobject.c
Original file line number Diff line number Diff line change
Expand Up @@ -1475,74 +1475,68 @@ copy_small_table(setentry *dest, setentry *src)
}
#endif

/* set_swap_bodies() switches the contents of any two sets by moving their
internal data pointers and, if needed, copying the internal smalltables.
Semantically equivalent to:
/* set_replace_body() replaces the contents of dst with those of src,
moving dst's old contents into src for proper cleanup on Py_DECREF.

t=set(a); a.clear(); a.update(b); b.clear(); b.update(t); del t
The caller guarantees that src is a uniquely-referenced temporary set
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If the called is required to cleanup src immediately afterwards, maybe it is cleaner to let the set_replace_body steal the reference and do the decref.

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In set_intersection_update_multi_impl, I placed Py_DECREF(tmp) after Py_END_CRITICAL_SECTION().

My reasoning was that moving the decref inside set_replace_body would run the deallocation (and decref of all keys) while still holding the critical section lock, which I thought might be undesirable.

By keeping it outside, the caller has control over when deallocation happens.

This is my first PR though and I'm still getting familiar with the project, so if I'm wrong here - happy to rework it.

that will be discarded immediately afterward. This allows us to skip
atomic operations and shared-marking on src's fields, and to skip the
frozenset hash swap (neither argument is ever a frozenset here).

The function always succeeds and it leaves both objects in a stable state.
Useful for operations that update in-place (by allowing an intermediate
result to be swapped into one of the original inputs).
*/

static void
set_swap_bodies(PySetObject *a, PySetObject *b)
set_replace_body(PySetObject *dst, PySetObject *src)
{
Py_ssize_t t;
setentry *u;
setentry tab[PySet_MINSIZE];
Py_hash_t h;

setentry *a_table = a->table;
setentry *b_table = b->table;
FT_ATOMIC_STORE_PTR_RELEASE(a->table, NULL);
FT_ATOMIC_STORE_PTR_RELEASE(b->table, NULL);

t = a->fill; a->fill = b->fill; b->fill = t;
t = a->used;
FT_ATOMIC_STORE_SSIZE_RELAXED(a->used, b->used);
FT_ATOMIC_STORE_SSIZE_RELAXED(b->used, t);
t = a->mask;
FT_ATOMIC_STORE_SSIZE_RELEASE(a->mask, b->mask);
FT_ATOMIC_STORE_SSIZE_RELEASE(b->mask, t);

u = a_table;
if (a_table == a->smalltable)
u = b->smalltable;
a_table = b_table;
if (b_table == b->smalltable)
a_table = a->smalltable;
b_table = u;

if (a_table == a->smalltable || b_table == b->smalltable) {
memcpy(tab, a->smalltable, sizeof(tab));

assert(!PyType_IsSubtype(Py_TYPE(dst), &PyFrozenSet_Type));
assert(!PyType_IsSubtype(Py_TYPE(src), &PyFrozenSet_Type));
assert(Py_REFCNT(src) == 1);
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
assert(Py_REFCNT(src) == 1);
assert(_PyObject_IsUniquelyReferenced(src));

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This assertion is going to be prone to gc.get_objects()/gc.get_referrers() issues like in #148180.

I'm not sure if we should do anything about that.

Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Will gc.get_objects and similar ever see this set? They use a stop-the-world heap traversal, which can't happen until set_intersection_update / set_intersection_update_multi_impl returns, and by the time that happens, this src set is already gone.

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree with @zhuyifei1999 - src is a local variable that gets freed before the function returns, so gc can't observe it.

Switched to _PyObject_IsUniquelyReferenced per @eendebakpt's suggestion regardless, since I believe it's the correct API for free-threaded builds.


setentry *dst_table = dst->table;
setentry *src_table = src->table;
FT_ATOMIC_STORE_PTR_RELEASE(dst->table, NULL);
src->table = NULL;

t = dst->fill; dst->fill = src->fill; src->fill = t;
t = dst->used;
FT_ATOMIC_STORE_SSIZE_RELAXED(dst->used, src->used);
src->used = t;
t = dst->mask;
FT_ATOMIC_STORE_SSIZE_RELEASE(dst->mask, src->mask);
src->mask = t;

u = dst_table;
if (dst_table == dst->smalltable)
u = src->smalltable;
dst_table = src_table;
if (src_table == src->smalltable)
dst_table = dst->smalltable;
src_table = u;

if (dst_table == dst->smalltable || src_table == src->smalltable) {
memcpy(tab, dst->smalltable, sizeof(tab));
#ifndef Py_GIL_DISABLED
memcpy(a->smalltable, b->smalltable, sizeof(tab));
memcpy(b->smalltable, tab, sizeof(tab));
memcpy(dst->smalltable, src->smalltable, sizeof(tab));
memcpy(src->smalltable, tab, sizeof(tab));
#else
copy_small_table(a->smalltable, b->smalltable);
copy_small_table(b->smalltable, tab);
copy_small_table(dst->smalltable, src->smalltable);
memcpy(src->smalltable, tab, sizeof(tab));
#endif
}

if (PyType_IsSubtype(Py_TYPE(a), &PyFrozenSet_Type) &&
PyType_IsSubtype(Py_TYPE(b), &PyFrozenSet_Type)) {
h = FT_ATOMIC_LOAD_SSIZE_RELAXED(a->hash);
FT_ATOMIC_STORE_SSIZE_RELAXED(a->hash, FT_ATOMIC_LOAD_SSIZE_RELAXED(b->hash));
FT_ATOMIC_STORE_SSIZE_RELAXED(b->hash, h);
} else {
FT_ATOMIC_STORE_SSIZE_RELAXED(a->hash, -1);
FT_ATOMIC_STORE_SSIZE_RELAXED(b->hash, -1);
}
if (!SET_IS_SHARED(b) && SET_IS_SHARED(a)) {
SET_MARK_SHARED(b);
}
if (!SET_IS_SHARED(a) && SET_IS_SHARED(b)) {
SET_MARK_SHARED(a);
FT_ATOMIC_STORE_SSIZE_RELAXED(dst->hash, -1);

if (SET_IS_SHARED(dst)) {
SET_MARK_SHARED(src);
}
FT_ATOMIC_STORE_PTR_RELEASE(a->table, a_table);
FT_ATOMIC_STORE_PTR_RELEASE(b->table, b_table);

FT_ATOMIC_STORE_PTR_RELEASE(dst->table, dst_table);
src->table = src_table;
}

/*[clinic input]
Expand Down Expand Up @@ -1797,7 +1791,7 @@ set_intersection_update(PySetObject *so, PyObject *other)
tmp = set_intersection(so, other);
if (tmp == NULL)
return NULL;
set_swap_bodies(so, (PySetObject *)tmp);
set_replace_body(so, (PySetObject *)tmp);
Py_DECREF(tmp);
Py_RETURN_NONE;
}
Expand All @@ -1821,7 +1815,7 @@ set_intersection_update_multi_impl(PySetObject *so, PyObject * const *others,
if (tmp == NULL)
return NULL;
Py_BEGIN_CRITICAL_SECTION(so);
set_swap_bodies(so, (PySetObject *)tmp);
set_replace_body(so, (PySetObject *)tmp);
Py_END_CRITICAL_SECTION();
Py_DECREF(tmp);
Py_RETURN_NONE;
Expand Down
Loading