mirror of
https://github.com/JakeHillion/drgn.git
synced 2024-12-22 17:23:06 +00:00
drgn.helpers.linux: add proper XArray helpers
Commit89eb868e95
("helpers: make find_task() work on recent kernels") made radix_tree_lookup() and radix_tree_for_each() work for basic XArrays. However, it doesn't handle a couple of more advanced features: multi-index entries (which old radix trees actually also supported) and zero entries. It has also been really confusing to explain to people unfamiliar with the radix tree -> XArray transition that they should use helpers named radix_tree for a structure named xarray. So, let's finally add xa_load(), xa_for_each(), and some additional auxiliary helpers. The non-recursive xa_for_each() implementation is based on Kevin Svetlitski's C implementation from commit2b47583c73
("Rewrite linux helper iterators in C"). radix_tree_lookup() and radix_tree_for_each() share the implementation with xa_load() and xa_for_each(), respectively, so they are mostly interchangeable. Fixes: #61 Signed-off-by: Omar Sandoval <osandov@osandov.com>
This commit is contained in:
parent
3c76c12ef1
commit
7ce84a3f1f
11
_drgn.pyi
11
_drgn.pyi
@ -2306,16 +2306,7 @@ def _linux_helper_direct_mapping_offset(prog: Program) -> int: ...
|
||||
def _linux_helper_read_vm(
|
||||
prog: Program, pgtable: Object, address: IntegerLike, size: IntegerLike
|
||||
) -> bytes: ...
|
||||
def _linux_helper_radix_tree_lookup(root: Object, index: IntegerLike) -> Object:
|
||||
"""
|
||||
Look up the entry at a given index in a radix tree.
|
||||
|
||||
:param root: ``struct radix_tree_root *``
|
||||
:param index: Entry index.
|
||||
:return: ``void *`` found entry, or ``NULL`` if not found.
|
||||
"""
|
||||
...
|
||||
|
||||
def _linux_helper_xa_load(xa: Object, index: IntegerLike) -> Object: ...
|
||||
def _linux_helper_per_cpu_ptr(ptr: Object, cpu: IntegerLike) -> Object:
|
||||
"""
|
||||
Return the per-CPU pointer for a given CPU.
|
||||
|
@ -7,36 +7,33 @@ Radix Trees
|
||||
|
||||
The ``drgn.helpers.linux.radixtree`` module provides helpers for working with
|
||||
radix trees from :linux:`include/linux/radix-tree.h`.
|
||||
|
||||
.. seealso::
|
||||
|
||||
`XArrays`_, which were introduced in Linux 4.20 as a replacement for radix
|
||||
trees.
|
||||
"""
|
||||
|
||||
from typing import Iterator, Tuple
|
||||
|
||||
from _drgn import _linux_helper_radix_tree_lookup as radix_tree_lookup
|
||||
from drgn import Object, cast
|
||||
from drgn import IntegerLike, Object
|
||||
from drgn.helpers.linux.xarray import xa_for_each, xa_load
|
||||
|
||||
__all__ = (
|
||||
"radix_tree_for_each",
|
||||
"radix_tree_lookup",
|
||||
)
|
||||
|
||||
_RADIX_TREE_ENTRY_MASK = 3
|
||||
|
||||
def radix_tree_lookup(root: Object, index: IntegerLike) -> Object:
|
||||
"""
|
||||
Look up the entry at a given index in a radix tree.
|
||||
|
||||
def _is_internal_node(node: Object, internal_node: int) -> bool:
|
||||
return (node.value_() & _RADIX_TREE_ENTRY_MASK) == internal_node
|
||||
|
||||
|
||||
def _entry_to_node(node: Object, internal_node: int) -> Object:
|
||||
return Object(node.prog_, node.type_, value=node.value_() & ~internal_node)
|
||||
|
||||
|
||||
def _radix_tree_root_node(root: Object) -> Tuple[Object, int]:
|
||||
try:
|
||||
node = root.xa_head
|
||||
except AttributeError:
|
||||
return root.rnode.read_(), 1
|
||||
else:
|
||||
return cast("struct xa_node *", node).read_(), 2
|
||||
:param root: ``struct radix_tree_root *``
|
||||
:param index: Entry index.
|
||||
:return: ``void *`` found entry, or ``NULL`` if not found.
|
||||
"""
|
||||
return xa_load(root, index)
|
||||
|
||||
|
||||
def radix_tree_for_each(root: Object) -> Iterator[Tuple[int, Object]]:
|
||||
@ -46,17 +43,4 @@ def radix_tree_for_each(root: Object) -> Iterator[Tuple[int, Object]]:
|
||||
:param root: ``struct radix_tree_root *``
|
||||
:return: Iterator of (index, ``void *``) tuples.
|
||||
"""
|
||||
node, RADIX_TREE_INTERNAL_NODE = _radix_tree_root_node(root)
|
||||
|
||||
def aux(node: Object, index: int) -> Iterator[Tuple[int, Object]]:
|
||||
if _is_internal_node(node, RADIX_TREE_INTERNAL_NODE):
|
||||
parent = _entry_to_node(node, RADIX_TREE_INTERNAL_NODE)
|
||||
for i, slot in enumerate(parent.slots):
|
||||
yield from aux(
|
||||
cast(parent.type_, slot).read_(),
|
||||
index + (i << parent.shift.value_()),
|
||||
)
|
||||
elif node:
|
||||
yield index, cast("void *", node)
|
||||
|
||||
yield from aux(node, 0)
|
||||
return xa_for_each(root)
|
||||
|
239
drgn/helpers/linux/xarray.py
Normal file
239
drgn/helpers/linux/xarray.py
Normal file
@ -0,0 +1,239 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
|
||||
"""
|
||||
XArrays
|
||||
-------
|
||||
|
||||
The ``drgn.helpers.linux.xarray`` module provides helpers for working with the
|
||||
`XArray <https://docs.kernel.org/core-api/xarray.html>`_ data structure from
|
||||
:linux:`include/linux/xarray.h`.
|
||||
|
||||
.. note::
|
||||
|
||||
XArrays were introduced in Linux 4.20 as a replacement for `radix trees`_.
|
||||
To make it easier to work with data structure that were changed from a
|
||||
radix tree to an XArray (like ``struct address_space::i_pages``), drgn
|
||||
treats XArrays and radix trees interchangeably in some cases.
|
||||
|
||||
Specifically, :func:`~drgn.helpers.linux.xarray.xa_load()` is equivalent to
|
||||
:func:`~drgn.helpers.linux.radixtree.radix_tree_lookup()`, and
|
||||
:func:`~drgn.helpers.linux.xarray.xa_for_each()` is equivalent to
|
||||
:func:`~drgn.helpers.linux.radixtree.radix_tree_for_each()`, except that
|
||||
the radix tree helpers assume ``advanced=False``. (Therefore,
|
||||
:func:`~drgn.helpers.linux.xarray.xa_load()` and
|
||||
:func:`~drgn.helpers.linux.xarray.xa_for_each()` also accept a ``struct
|
||||
radix_tree_root *``, and
|
||||
:func:`~drgn.helpers.linux.radixtree.radix_tree_lookup()` and
|
||||
:func:`~drgn.helpers.linux.radixtree.radix_tree_for_each()` also accept a
|
||||
``struct xarray *``.)
|
||||
"""
|
||||
|
||||
from typing import Iterator, Optional, Tuple
|
||||
|
||||
from _drgn import _linux_helper_xa_load
|
||||
from drgn import NULL, IntegerLike, Object, cast
|
||||
|
||||
__all__ = (
|
||||
"xa_for_each",
|
||||
"xa_is_value",
|
||||
"xa_is_zero",
|
||||
"xa_load",
|
||||
"xa_to_value",
|
||||
)
|
||||
|
||||
|
||||
_XA_ZERO_ENTRY = 1030 # xa_mk_internal(257)
|
||||
|
||||
|
||||
def xa_load(xa: Object, index: IntegerLike, *, advanced: bool = False) -> Object:
|
||||
"""
|
||||
Look up the entry at a given index in an XArray.
|
||||
|
||||
>>> entry = xa_load(inode.i_mapping.i_pages.address_of_(), 2)
|
||||
>>> cast("struct page *", entry)
|
||||
*(struct page *)0xffffed6980306f40 = {
|
||||
...
|
||||
}
|
||||
|
||||
:param xa: ``struct xarray *``
|
||||
:param index: Entry index.
|
||||
:param advanced: Whether to return nodes only visible to the XArray
|
||||
advanced API. If ``False``, zero entries (see :func:`xa_is_zero()`)
|
||||
will be returned as ``NULL``.
|
||||
:return: ``void *`` found entry, or ``NULL`` if not found.
|
||||
"""
|
||||
entry = _linux_helper_xa_load(xa, index)
|
||||
if not advanced and entry.value_() == _XA_ZERO_ENTRY:
|
||||
return NULL(xa.prog_, "void *")
|
||||
return entry
|
||||
|
||||
|
||||
class _XAIteratorNode:
|
||||
def __init__(self, node: Object, index: int) -> None:
|
||||
self.slots = node.slots
|
||||
self.shift = node.shift.value_()
|
||||
self.index = index
|
||||
self.next_slot = 0
|
||||
|
||||
|
||||
def xa_for_each(xa: Object, *, advanced: bool = False) -> Iterator[Tuple[int, Object]]:
|
||||
"""
|
||||
Iterate over all of the entries in an XArray.
|
||||
|
||||
>>> for index, entry in xa_for_each(inode.i_mapping.i_pages.address_of_()):
|
||||
... print(index, entry)
|
||||
...
|
||||
0 (void *)0xffffed6980356140
|
||||
1 (void *)0xffffed6980306f80
|
||||
2 (void *)0xffffed6980306f40
|
||||
3 (void *)0xffffed6980355b40
|
||||
|
||||
:param xa: ``struct xarray *``
|
||||
:param advanced: Whether to return nodes only visible to the XArray
|
||||
advanced API. If ``False``, zero entries (see :func:`xa_is_zero()`)
|
||||
will be skipped.
|
||||
:return: Iterator of (index, ``void *``) tuples.
|
||||
"""
|
||||
prog = xa.prog_
|
||||
|
||||
def should_yield(entry_value: int) -> bool:
|
||||
return entry_value != 0
|
||||
|
||||
# This handles three cases:
|
||||
#
|
||||
# 1. XArrays.
|
||||
# 2. Radix trees since Linux kernel commit f8d5d0cc145c ("xarray: Add
|
||||
# definition of struct xarray") (in v4.20) redefined them in terms of
|
||||
# XArrays. These reuse the XArray structures and are close enough to
|
||||
# case 1 that the same code handles both.
|
||||
# 3. Radix trees before that commit. These are similar to cases 1 and 2,
|
||||
# but they have different type and member names, use different flags in
|
||||
# the lower bits (see Linux kernel commit 3159f943aafd ("xarray: Replace
|
||||
# exceptional entries") (in v4.20)), and represent sibling entries
|
||||
# differently (see Linux kernel commit 02c02bf12c5d ("xarray: Change
|
||||
# definition of sibling entries") (in v4.20)).
|
||||
try:
|
||||
entry = xa.xa_head.read_()
|
||||
except AttributeError:
|
||||
entry = xa.rnode
|
||||
node_type = entry.type_
|
||||
entry = cast("void *", entry)
|
||||
|
||||
# Return > 0 if radix_tree_is_internal_node(), < 0 if
|
||||
# is_sibling_entry(), and 0 otherwise.
|
||||
def is_internal(slots: Optional[Object], entry_value: int) -> int:
|
||||
if (entry_value & 3) == 1:
|
||||
# slots must be a reference object, so address_ is never None.
|
||||
if slots is not None and (
|
||||
slots.address_ <= entry_value < slots[len(slots)].address_ # type: ignore[operator]
|
||||
):
|
||||
return -1
|
||||
else:
|
||||
return 1
|
||||
return 0
|
||||
|
||||
# entry_to_node()
|
||||
def to_node(entry_value: int) -> Object:
|
||||
return Object(prog, node_type, entry_value - 1)
|
||||
|
||||
else:
|
||||
node_type = prog.type("struct xa_node *")
|
||||
|
||||
# Return > 0 if xa_is_node(), < 0 if xa_is_sibling(), and 0 otherwise.
|
||||
def is_internal(slots: Optional[Object], entry_value: int) -> int:
|
||||
if (entry_value & 3) == 2:
|
||||
if entry_value > 4096:
|
||||
return 1
|
||||
elif entry_value < 256:
|
||||
return -1
|
||||
return 0
|
||||
|
||||
# xa_to_node()
|
||||
def to_node(entry_value: int) -> Object:
|
||||
return Object(prog, node_type, entry_value - 2)
|
||||
|
||||
if not advanced:
|
||||
# We're intentionally redefining should_yield() for this case.
|
||||
def should_yield(entry_value: int) -> bool: # noqa: F811
|
||||
return entry_value != 0 and entry_value != _XA_ZERO_ENTRY
|
||||
|
||||
entry_value = entry.value_()
|
||||
internal = is_internal(None, entry_value)
|
||||
if internal > 0:
|
||||
stack = [_XAIteratorNode(to_node(entry_value), 0)]
|
||||
else:
|
||||
if internal == 0 and should_yield(entry_value):
|
||||
yield 0, entry
|
||||
return
|
||||
|
||||
while stack:
|
||||
node = stack[-1]
|
||||
if node.next_slot >= len(node.slots):
|
||||
stack.pop()
|
||||
continue
|
||||
|
||||
entry = node.slots[node.next_slot].read_()
|
||||
entry_value = entry.value_()
|
||||
|
||||
index = node.index + (node.next_slot << node.shift)
|
||||
node.next_slot += 1
|
||||
|
||||
internal = is_internal(node.slots, entry_value)
|
||||
if internal > 0:
|
||||
stack.append(_XAIteratorNode(to_node(entry_value), index))
|
||||
elif internal == 0 and should_yield(entry_value):
|
||||
yield index, entry
|
||||
|
||||
|
||||
def xa_is_value(entry: Object) -> bool:
|
||||
"""
|
||||
Return whether an XArray entry is a value.
|
||||
|
||||
See :func:`xa_to_value()`.
|
||||
|
||||
:param entry: ``void *``
|
||||
"""
|
||||
return (entry.value_() & 1) != 0
|
||||
|
||||
|
||||
def xa_to_value(entry: Object) -> Object:
|
||||
"""
|
||||
Return the value in an XArray entry.
|
||||
|
||||
In addition to pointers, XArrays can store integers between 0 and
|
||||
``LONG_MAX``. If :func:`xa_is_value()` returns ``True``, use this to get
|
||||
the stored integer.
|
||||
|
||||
>>> entry = xa_load(xa, 9)
|
||||
>>> entry
|
||||
(void *)0xc9
|
||||
>>> xa_is_value(entry)
|
||||
True
|
||||
>>> xa_to_value(entry)
|
||||
100
|
||||
|
||||
:param entry: ``void *``
|
||||
:return: ``unsigned long``
|
||||
"""
|
||||
return cast("unsigned long", entry) >> 1
|
||||
|
||||
|
||||
def xa_is_zero(entry: Object) -> bool:
|
||||
"""
|
||||
Return whether an XArray entry is a "zero" entry.
|
||||
|
||||
A zero entry is an entry that was reserved but is not present. These are
|
||||
only visible to the XArray advanced API, so they are only returned by
|
||||
:func:`xa_load()` and :func:`xa_for_each()` when ``advanced = True``.
|
||||
|
||||
>>> entry = xa_load(xa, 10, advanced=True)
|
||||
(void *)0x406
|
||||
>>> xa_is_zero(entry)
|
||||
True
|
||||
>>> xa_load(xa, 10)
|
||||
(void *)0
|
||||
|
||||
:param entry: ``void *``
|
||||
"""
|
||||
return entry.value_() == _XA_ZERO_ENTRY
|
@ -39,8 +39,8 @@ struct drgn_error *linux_helper_task_cpu(const struct drgn_object *task,
|
||||
uint64_t *ret);
|
||||
|
||||
struct drgn_error *
|
||||
linux_helper_radix_tree_lookup(struct drgn_object *res,
|
||||
const struct drgn_object *root, uint64_t index);
|
||||
linux_helper_xa_load(struct drgn_object *res, const struct drgn_object *xa,
|
||||
uint64_t index);
|
||||
|
||||
struct drgn_error *linux_helper_idr_find(struct drgn_object *res,
|
||||
const struct drgn_object *idr,
|
||||
|
@ -312,51 +312,61 @@ out:
|
||||
}
|
||||
|
||||
struct drgn_error *
|
||||
linux_helper_radix_tree_lookup(struct drgn_object *res,
|
||||
const struct drgn_object *root, uint64_t index)
|
||||
linux_helper_xa_load(struct drgn_object *res,
|
||||
const struct drgn_object *xa, uint64_t index)
|
||||
{
|
||||
struct drgn_error *err;
|
||||
static const uint64_t RADIX_TREE_ENTRY_MASK = 3;
|
||||
uint64_t RADIX_TREE_INTERNAL_NODE;
|
||||
uint64_t RADIX_TREE_MAP_MASK;
|
||||
struct drgn_object node, tmp;
|
||||
struct drgn_qualified_type node_type;
|
||||
|
||||
struct drgn_object entry, node, tmp;
|
||||
struct drgn_qualified_type node_type;
|
||||
uint64_t internal_flag, node_min;
|
||||
|
||||
drgn_object_init(&entry, drgn_object_program(res));
|
||||
drgn_object_init(&node, drgn_object_program(res));
|
||||
drgn_object_init(&tmp, drgn_object_program(res));
|
||||
|
||||
/* node = root->xa_head */
|
||||
err = drgn_object_member_dereference(&node, root, "xa_head");
|
||||
// See xa_for_each() in drgn/helpers/linux/xarray.py for a description
|
||||
// of the cases we have to handle.
|
||||
// entry = xa->xa_head
|
||||
err = drgn_object_member_dereference(&entry, xa, "xa_head");
|
||||
if (!err) {
|
||||
err = drgn_object_read(&entry, &entry);
|
||||
if (err)
|
||||
goto out;
|
||||
// node_type = struct xa_node *
|
||||
err = drgn_program_find_type(drgn_object_program(res),
|
||||
"struct xa_node *", NULL,
|
||||
&node_type);
|
||||
if (err)
|
||||
goto out;
|
||||
RADIX_TREE_INTERNAL_NODE = 2;
|
||||
internal_flag = 2;
|
||||
node_min = 4097;
|
||||
} else if (err->code == DRGN_ERROR_LOOKUP) {
|
||||
drgn_error_destroy(err);
|
||||
/* node = (void *)root.rnode */
|
||||
err = drgn_object_member_dereference(&node, root, "rnode");
|
||||
// entry = (void *)xa->rnode
|
||||
err = drgn_object_member_dereference(&entry, xa, "rnode");
|
||||
if (err)
|
||||
goto out;
|
||||
// node_type = typeof(xa->rnode)
|
||||
node_type = drgn_object_qualified_type(&entry);
|
||||
struct drgn_qualified_type voidp_type;
|
||||
err = drgn_program_find_type(drgn_object_program(res), "void *",
|
||||
NULL, &node_type);
|
||||
NULL, &voidp_type);
|
||||
if (err)
|
||||
goto out;
|
||||
err = drgn_object_cast(&node, node_type, &node);
|
||||
err = drgn_object_cast(&entry, voidp_type, &entry);
|
||||
if (err)
|
||||
goto out;
|
||||
err = drgn_program_find_type(drgn_object_program(res),
|
||||
"struct radix_tree_node *", NULL,
|
||||
&node_type);
|
||||
if (err)
|
||||
goto out;
|
||||
RADIX_TREE_INTERNAL_NODE = 1;
|
||||
internal_flag = 1;
|
||||
node_min = 0;
|
||||
} else {
|
||||
goto out;
|
||||
}
|
||||
|
||||
// xa_is_node() or radix_tree_is_internal_node()
|
||||
#define is_node(entry_value) \
|
||||
(((entry_value) & 3) == internal_flag && (entry_value) >= node_min)
|
||||
|
||||
struct drgn_type_member *member;
|
||||
uint64_t member_bit_offset;
|
||||
err = drgn_type_find_member(drgn_type_type(node_type.type).type,
|
||||
@ -369,52 +379,151 @@ linux_helper_radix_tree_lookup(struct drgn_object *res,
|
||||
goto out;
|
||||
if (drgn_type_kind(member_type.type) != DRGN_TYPE_ARRAY) {
|
||||
err = drgn_error_create(DRGN_ERROR_TYPE,
|
||||
"struct radix_tree_node slots member is not an array");
|
||||
"struct xa_node slots member is not an array");
|
||||
goto out;
|
||||
}
|
||||
RADIX_TREE_MAP_MASK = drgn_type_length(member_type.type) - 1;
|
||||
|
||||
for (;;) {
|
||||
uint64_t value;
|
||||
union drgn_value shift;
|
||||
uint64_t offset;
|
||||
|
||||
err = drgn_object_read(&node, &node);
|
||||
uint64_t XA_CHUNK_MASK = drgn_type_length(member_type.type) - 1;
|
||||
uint64_t sizeof_slots;
|
||||
if (node_min == 0) { // !xarray
|
||||
err = drgn_type_sizeof(member_type.type, &sizeof_slots);
|
||||
if (err)
|
||||
goto out;
|
||||
err = drgn_object_read_unsigned(&node, &value);
|
||||
}
|
||||
|
||||
uint64_t entry_value;
|
||||
err = drgn_object_read_unsigned(&entry, &entry_value);
|
||||
if (err)
|
||||
goto out;
|
||||
if ((value & RADIX_TREE_ENTRY_MASK) != RADIX_TREE_INTERNAL_NODE)
|
||||
break;
|
||||
if (is_node(entry_value)) {
|
||||
// node = xa_to_node(entry)
|
||||
// or
|
||||
// node = entry_to_node(entry)
|
||||
err = drgn_object_set_unsigned(&node, node_type,
|
||||
value & ~RADIX_TREE_INTERNAL_NODE,
|
||||
0);
|
||||
entry_value - internal_flag, 0);
|
||||
if (err)
|
||||
goto out;
|
||||
// node_shift = node->shift
|
||||
err = drgn_object_member_dereference(&tmp, &node, "shift");
|
||||
if (err)
|
||||
goto out;
|
||||
err = drgn_object_read_integer(&tmp, &shift);
|
||||
union drgn_value node_shift;
|
||||
err = drgn_object_read_integer(&tmp, &node_shift);
|
||||
if (err)
|
||||
goto out;
|
||||
if (shift.uvalue >= 64)
|
||||
|
||||
uint64_t offset;
|
||||
if (node_shift.uvalue >= 64) // Avoid undefined behavior.
|
||||
offset = 0;
|
||||
else
|
||||
offset = (index >> shift.uvalue) & RADIX_TREE_MAP_MASK;
|
||||
err = drgn_object_member_dereference(&tmp, &node, "slots");
|
||||
offset = index >> node_shift.uvalue;
|
||||
if (offset > XA_CHUNK_MASK)
|
||||
goto null;
|
||||
|
||||
for (;;) {
|
||||
// entry = node->slots[offset]
|
||||
err = drgn_object_member_dereference(&tmp, &node,
|
||||
"slots");
|
||||
if (err)
|
||||
goto out;
|
||||
err = drgn_object_subscript(&node, &tmp, offset);
|
||||
err = drgn_object_subscript(&entry, &tmp, offset);
|
||||
if (err)
|
||||
goto out;
|
||||
err = drgn_object_read(&entry, &entry);
|
||||
if (err)
|
||||
goto out;
|
||||
err = drgn_object_read_unsigned(&entry, &entry_value);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if ((entry_value & 3) == internal_flag) {
|
||||
if (node_min != 0 && // xarray
|
||||
entry_value < 256) { // xa_is_sibling()
|
||||
// entry = node->slots[xa_to_sibling(entry)]
|
||||
err = drgn_object_subscript(&entry,
|
||||
&tmp,
|
||||
entry_value >> 2);
|
||||
if (err)
|
||||
goto out;
|
||||
err = drgn_object_read(&entry, &entry);
|
||||
if (err)
|
||||
goto out;
|
||||
err = drgn_object_read_unsigned(&entry,
|
||||
&entry_value);
|
||||
if (err)
|
||||
goto out;
|
||||
} else if (node_min == 0 && // !xarray
|
||||
tmp.address <= entry_value &&
|
||||
entry_value < tmp.address + sizeof_slots) { // is_sibling_entry()
|
||||
// entry = *(void **)entry_to_node(entry)
|
||||
struct drgn_qualified_type voidpp_type;
|
||||
err = drgn_program_find_type(drgn_object_program(res),
|
||||
"void **",
|
||||
NULL,
|
||||
&voidpp_type);
|
||||
if (err)
|
||||
goto out;
|
||||
err = drgn_object_set_unsigned(&entry,
|
||||
voidpp_type,
|
||||
entry_value - 1,
|
||||
0);
|
||||
if (err)
|
||||
goto out;
|
||||
err = drgn_object_dereference(&entry,
|
||||
&entry);
|
||||
if (err)
|
||||
goto out;
|
||||
err = drgn_object_read(&entry, &entry);
|
||||
if (err)
|
||||
goto out;
|
||||
err = drgn_object_read_unsigned(&entry,
|
||||
&entry_value);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
err = drgn_object_copy(res, &node);
|
||||
if (node_shift.uvalue == 0 || !is_node(entry_value))
|
||||
break;
|
||||
|
||||
// node = xa_to_node(entry)
|
||||
// or
|
||||
// node = entry_to_node(entry)
|
||||
err = drgn_object_set_unsigned(&node, node_type,
|
||||
entry_value - internal_flag,
|
||||
0);
|
||||
if (err)
|
||||
goto out;
|
||||
// node_shift = node->shift
|
||||
err = drgn_object_member_dereference(&tmp, &node,
|
||||
"shift");
|
||||
if (err)
|
||||
goto out;
|
||||
err = drgn_object_read_integer(&tmp, &node_shift);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (node_shift.uvalue >= 64) // Avoid undefined behavior.
|
||||
offset = 0;
|
||||
else
|
||||
offset = (index >> node_shift.uvalue) & XA_CHUNK_MASK;
|
||||
}
|
||||
} else if (index) {
|
||||
goto null;
|
||||
}
|
||||
|
||||
err = drgn_object_copy(res, &entry);
|
||||
out:
|
||||
drgn_object_deinit(&tmp);
|
||||
drgn_object_deinit(&node);
|
||||
drgn_object_deinit(&entry);
|
||||
return err;
|
||||
|
||||
null:
|
||||
err = drgn_object_set_unsigned(res, drgn_object_qualified_type(&entry),
|
||||
0, 0);
|
||||
goto out;
|
||||
|
||||
#undef is_node
|
||||
}
|
||||
|
||||
struct drgn_error *linux_helper_idr_find(struct drgn_object *res,
|
||||
@ -449,7 +558,7 @@ struct drgn_error *linux_helper_idr_find(struct drgn_object *res,
|
||||
err = drgn_object_address_of(&tmp, &tmp);
|
||||
if (err)
|
||||
goto out;
|
||||
err = linux_helper_radix_tree_lookup(res, &tmp, id);
|
||||
err = linux_helper_xa_load(res, &tmp, id);
|
||||
out:
|
||||
drgn_object_deinit(&tmp);
|
||||
return err;
|
||||
|
@ -335,8 +335,7 @@ DrgnObject *drgnpy_linux_helper_idle_task(PyObject *self, PyObject *args,
|
||||
PyObject *kwds);
|
||||
PyObject *drgnpy_linux_helper_task_cpu(PyObject *self, PyObject *args,
|
||||
PyObject *kwds);
|
||||
DrgnObject *drgnpy_linux_helper_radix_tree_lookup(PyObject *self,
|
||||
PyObject *args,
|
||||
DrgnObject *drgnpy_linux_helper_xa_load(PyObject *self, PyObject *args,
|
||||
PyObject *kwds);
|
||||
DrgnObject *drgnpy_linux_helper_idr_find(PyObject *self, PyObject *args,
|
||||
PyObject *kwds);
|
||||
|
@ -117,26 +117,22 @@ PyObject *drgnpy_linux_helper_task_cpu(PyObject *self, PyObject *args,
|
||||
return PyLong_FromUint64(cpu);
|
||||
}
|
||||
|
||||
DrgnObject *drgnpy_linux_helper_radix_tree_lookup(PyObject *self,
|
||||
PyObject *args,
|
||||
DrgnObject *drgnpy_linux_helper_xa_load(PyObject *self, PyObject *args,
|
||||
PyObject *kwds)
|
||||
{
|
||||
static char *keywords[] = {"root", "index", NULL};
|
||||
static char *keywords[] = {"xa", "index", NULL};
|
||||
struct drgn_error *err;
|
||||
DrgnObject *root;
|
||||
DrgnObject *xa;
|
||||
struct index_arg index = {};
|
||||
DrgnObject *res;
|
||||
|
||||
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O&:radix_tree_lookup",
|
||||
keywords, &DrgnObject_type, &root,
|
||||
index_converter, &index))
|
||||
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O&:xa_load", keywords,
|
||||
&DrgnObject_type, &xa, index_converter,
|
||||
&index))
|
||||
return NULL;
|
||||
|
||||
res = DrgnObject_alloc(DrgnObject_prog(root));
|
||||
DrgnObject *res = DrgnObject_alloc(DrgnObject_prog(xa));
|
||||
if (!res)
|
||||
return NULL;
|
||||
err = linux_helper_radix_tree_lookup(&res->obj, &root->obj,
|
||||
index.uvalue);
|
||||
err = linux_helper_xa_load(&res->obj, &xa->obj, index.uvalue);
|
||||
if (err) {
|
||||
Py_DECREF(res);
|
||||
return set_drgn_error(err);
|
||||
|
@ -132,8 +132,8 @@ static PyMethodDef drgn_methods[] = {
|
||||
METH_VARARGS | METH_KEYWORDS},
|
||||
{"_linux_helper_task_cpu", (PyCFunction)drgnpy_linux_helper_task_cpu,
|
||||
METH_VARARGS | METH_KEYWORDS},
|
||||
{"_linux_helper_radix_tree_lookup",
|
||||
(PyCFunction)drgnpy_linux_helper_radix_tree_lookup,
|
||||
{"_linux_helper_xa_load",
|
||||
(PyCFunction)drgnpy_linux_helper_xa_load,
|
||||
METH_VARARGS | METH_KEYWORDS},
|
||||
{"_linux_helper_idr_find", (PyCFunction)drgnpy_linux_helper_idr_find,
|
||||
METH_VARARGS | METH_KEYWORDS},
|
||||
|
122
tests/linux_kernel/helpers/test_radixtree.py
Normal file
122
tests/linux_kernel/helpers/test_radixtree.py
Normal file
@ -0,0 +1,122 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
|
||||
from drgn import NULL, Object
|
||||
from drgn.helpers.linux.radixtree import radix_tree_for_each, radix_tree_lookup
|
||||
from tests.linux_kernel import LinuxKernelTestCase, skip_unless_have_test_kmod
|
||||
|
||||
|
||||
@skip_unless_have_test_kmod
|
||||
class TestRadixTree(LinuxKernelTestCase):
|
||||
def test_radix_tree_lookup_empty(self):
|
||||
root = self.prog["drgn_test_radix_tree_empty"].address_of_()
|
||||
self.assertIdentical(radix_tree_lookup(root, 0), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(radix_tree_lookup(root, 100000), NULL(self.prog, "void *"))
|
||||
|
||||
def test_radix_tree_for_each_empty(self):
|
||||
root = self.prog["drgn_test_radix_tree_empty"].address_of_()
|
||||
self.assertIdentical(list(radix_tree_for_each(root)), [])
|
||||
|
||||
def test_radix_tree_lookup_one(self):
|
||||
root = self.prog["drgn_test_radix_tree_one"].address_of_()
|
||||
self.assertIdentical(radix_tree_lookup(root, 0), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(radix_tree_lookup(root, 665), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(
|
||||
radix_tree_lookup(root, 666), Object(self.prog, "void *", 0xDEADB00)
|
||||
)
|
||||
self.assertIdentical(radix_tree_lookup(root, 667), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(radix_tree_lookup(root, 100000), NULL(self.prog, "void *"))
|
||||
|
||||
def test_radix_tree_for_each_one(self):
|
||||
root = self.prog["drgn_test_radix_tree_one"].address_of_()
|
||||
self.assertIdentical(
|
||||
list(radix_tree_for_each(root)),
|
||||
[(666, Object(self.prog, "void *", 0xDEADB00))],
|
||||
)
|
||||
|
||||
def test_radix_tree_lookup_one_at_zero(self):
|
||||
root = self.prog["drgn_test_radix_tree_one_at_zero"].address_of_()
|
||||
self.assertIdentical(
|
||||
radix_tree_lookup(root, 0), Object(self.prog, "void *", 0x1234)
|
||||
)
|
||||
self.assertIdentical(radix_tree_lookup(root, 1), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(radix_tree_lookup(root, 100000), NULL(self.prog, "void *"))
|
||||
|
||||
def test_radix_tree_for_each_one_at_zero(self):
|
||||
root = self.prog["drgn_test_radix_tree_one_at_zero"].address_of_()
|
||||
self.assertIdentical(
|
||||
list(radix_tree_for_each(root)), [(0, Object(self.prog, "void *", 0x1234))]
|
||||
)
|
||||
|
||||
def test_radix_tree_lookup_sparse(self):
|
||||
root = self.prog["drgn_test_radix_tree_sparse"].address_of_()
|
||||
self.assertIdentical(radix_tree_lookup(root, 0), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(
|
||||
radix_tree_lookup(root, 1), Object(self.prog, "void *", 0x1234)
|
||||
)
|
||||
self.assertIdentical(radix_tree_lookup(root, 2), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(
|
||||
radix_tree_lookup(root, 0x40000000), NULL(self.prog, "void *")
|
||||
)
|
||||
self.assertIdentical(
|
||||
radix_tree_lookup(root, 0x80000000), NULL(self.prog, "void *")
|
||||
)
|
||||
self.assertIdentical(
|
||||
radix_tree_lookup(root, 0x80800000), NULL(self.prog, "void *")
|
||||
)
|
||||
self.assertIdentical(
|
||||
radix_tree_lookup(root, 0x80808000), NULL(self.prog, "void *")
|
||||
)
|
||||
self.assertIdentical(
|
||||
radix_tree_lookup(root, 0x80808080), Object(self.prog, "void *", 0x5678)
|
||||
)
|
||||
self.assertIdentical(
|
||||
radix_tree_lookup(root, 0xFFFFFFFE), NULL(self.prog, "void *")
|
||||
)
|
||||
self.assertIdentical(
|
||||
radix_tree_lookup(root, 0xFFFFFFFF), Object(self.prog, "void *", 0x9ABC)
|
||||
)
|
||||
|
||||
def test_radix_tree_for_each_sparse(self):
|
||||
root = self.prog["drgn_test_radix_tree_sparse"].address_of_()
|
||||
self.assertIdentical(
|
||||
list(radix_tree_for_each(root)),
|
||||
[
|
||||
(1, Object(self.prog, "void *", 0x1234)),
|
||||
(0x80808080, Object(self.prog, "void *", 0x5678)),
|
||||
(0xFFFFFFFF, Object(self.prog, "void *", 0x9ABC)),
|
||||
],
|
||||
)
|
||||
|
||||
def test_radix_tree_lookup_multi_index(self):
|
||||
try:
|
||||
root = self.prog["drgn_test_radix_tree_multi_order"].address_of_()
|
||||
except KeyError:
|
||||
# Radix tree multi-order support only exists between Linux kernel
|
||||
# commits e61452365372 ("radix_tree: add support for multi-order
|
||||
# entries") (in v4.6) and 3a08cd52c37c ("radix tree: Remove
|
||||
# multiorder support") (in v4.20), and only if
|
||||
# CONFIG_RADIX_TREE_MULTIORDER=y.
|
||||
self.skipTest("kernel does not have multi-order radix trees")
|
||||
self.assertIdentical(
|
||||
radix_tree_lookup(root, 0x80807FFF), NULL(self.prog, "void *")
|
||||
)
|
||||
for index in range(0x80808000, 0x80808200):
|
||||
with self.subTest(index=index):
|
||||
self.assertIdentical(
|
||||
radix_tree_lookup(root, index), Object(self.prog, "void *", 0x1234)
|
||||
)
|
||||
self.assertIdentical(
|
||||
radix_tree_lookup(root, 0x80808200), NULL(self.prog, "void *")
|
||||
)
|
||||
|
||||
def test_radix_tree_for_each_multi_index(self):
|
||||
try:
|
||||
root = self.prog["drgn_test_radix_tree_multi_order"].address_of_()
|
||||
except KeyError:
|
||||
# See test_radix_tree_lookup_multi_index().
|
||||
self.skipTest("kernel does not have multi-order radix trees")
|
||||
self.assertIdentical(
|
||||
list(radix_tree_for_each(root)),
|
||||
[(0x80808000, Object(self.prog, "void *", 0x1234))],
|
||||
)
|
153
tests/linux_kernel/helpers/test_xarray.py
Normal file
153
tests/linux_kernel/helpers/test_xarray.py
Normal file
@ -0,0 +1,153 @@
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
|
||||
import unittest
|
||||
|
||||
from drgn import NULL, Object
|
||||
from drgn.helpers.linux.xarray import (
|
||||
xa_for_each,
|
||||
xa_is_value,
|
||||
xa_is_zero,
|
||||
xa_load,
|
||||
xa_to_value,
|
||||
)
|
||||
from tests.linux_kernel import LinuxKernelTestCase, skip_unless_have_test_kmod
|
||||
|
||||
|
||||
@skip_unless_have_test_kmod
|
||||
class TestXArray(LinuxKernelTestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
super().setUpClass()
|
||||
if not cls.prog["drgn_test_have_xarray"]:
|
||||
raise unittest.SkipTest("kernel does not have XArray")
|
||||
|
||||
def test_xa_is_zero(self):
|
||||
self.assertTrue(xa_is_zero(self.prog["drgn_test_xa_zero_entry"]))
|
||||
self.assertFalse(xa_is_zero(NULL(self.prog, "void *")))
|
||||
|
||||
def test_xa_load_empty(self):
|
||||
xa = self.prog["drgn_test_xarray_empty"].address_of_()
|
||||
self.assertIdentical(xa_load(xa, 0), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(xa_load(xa, 100000), NULL(self.prog, "void *"))
|
||||
|
||||
def test_xa_for_each_empty(self):
|
||||
xa = self.prog["drgn_test_xarray_empty"].address_of_()
|
||||
self.assertIdentical(list(xa_for_each(xa)), [])
|
||||
|
||||
def test_xa_load_one(self):
|
||||
xa = self.prog["drgn_test_xarray_one"].address_of_()
|
||||
self.assertIdentical(xa_load(xa, 0), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(xa_load(xa, 665), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(xa_load(xa, 666), Object(self.prog, "void *", 0xDEADB00))
|
||||
self.assertIdentical(xa_load(xa, 667), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(xa_load(xa, 100000), NULL(self.prog, "void *"))
|
||||
|
||||
def test_xa_for_each_one(self):
|
||||
xa = self.prog["drgn_test_xarray_one"].address_of_()
|
||||
self.assertIdentical(
|
||||
list(xa_for_each(xa)), [(666, Object(self.prog, "void *", 0xDEADB00))]
|
||||
)
|
||||
|
||||
def test_xa_load_one_at_zero(self):
|
||||
xa = self.prog["drgn_test_xarray_one_at_zero"].address_of_()
|
||||
self.assertIdentical(xa_load(xa, 0), Object(self.prog, "void *", 0x1234))
|
||||
self.assertIdentical(xa_load(xa, 1), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(xa_load(xa, 100000), NULL(self.prog, "void *"))
|
||||
|
||||
def test_xa_for_each_one_at_zero(self):
|
||||
xa = self.prog["drgn_test_xarray_one_at_zero"].address_of_()
|
||||
self.assertIdentical(
|
||||
list(xa_for_each(xa)), [(0, Object(self.prog, "void *", 0x1234))]
|
||||
)
|
||||
|
||||
def test_xa_load_sparse(self):
|
||||
xa = self.prog["drgn_test_xarray_sparse"].address_of_()
|
||||
self.assertIdentical(xa_load(xa, 0), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(xa_load(xa, 1), Object(self.prog, "void *", 0x1234))
|
||||
self.assertIdentical(xa_load(xa, 2), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(xa_load(xa, 0x40000000), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(xa_load(xa, 0x80000000), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(xa_load(xa, 0x80800000), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(xa_load(xa, 0x80808000), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(
|
||||
xa_load(xa, 0x80808080), Object(self.prog, "void *", 0x5678)
|
||||
)
|
||||
self.assertIdentical(xa_load(xa, 0xFFFFFFFE), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(
|
||||
xa_load(xa, 0xFFFFFFFF), Object(self.prog, "void *", 0x9ABC)
|
||||
)
|
||||
|
||||
def test_xa_for_each_sparse(self):
|
||||
xa = self.prog["drgn_test_xarray_sparse"].address_of_()
|
||||
self.assertIdentical(
|
||||
list(xa_for_each(xa)),
|
||||
[
|
||||
(1, Object(self.prog, "void *", 0x1234)),
|
||||
(0x80808080, Object(self.prog, "void *", 0x5678)),
|
||||
(0xFFFFFFFF, Object(self.prog, "void *", 0x9ABC)),
|
||||
],
|
||||
)
|
||||
|
||||
def test_xa_load_multi_index(self):
|
||||
xa = self.prog["drgn_test_xarray_multi_index"].address_of_()
|
||||
self.assertIdentical(xa_load(xa, 0x80807FFF), NULL(self.prog, "void *"))
|
||||
for index in range(0x80808000, 0x80808200):
|
||||
with self.subTest(index=index):
|
||||
self.assertIdentical(
|
||||
xa_load(xa, index), Object(self.prog, "void *", 0x1234)
|
||||
)
|
||||
self.assertIdentical(xa_load(xa, 0x80808200), NULL(self.prog, "void *"))
|
||||
|
||||
def test_xa_for_each_multi_index(self):
|
||||
xa = self.prog["drgn_test_xarray_multi_index"].address_of_()
|
||||
self.assertIdentical(
|
||||
list(xa_for_each(xa)), [(0x80808000, Object(self.prog, "void *", 0x1234))]
|
||||
)
|
||||
|
||||
def test_xa_load_zero_entry(self):
|
||||
xa = self.prog["drgn_test_xarray_zero_entry"].address_of_()
|
||||
self.assertIdentical(xa_load(xa, 0), NULL(self.prog, "void *"))
|
||||
self.assertIdentical(xa_load(xa, 666), NULL(self.prog, "void *"))
|
||||
self.assertTrue(xa_is_zero(xa_load(xa, 666, advanced=True)))
|
||||
self.assertIdentical(xa_load(xa, 2), NULL(self.prog, "void *"))
|
||||
|
||||
def test_xa_for_each_zero_entry(self):
|
||||
xa = self.prog["drgn_test_xarray_zero_entry"].address_of_()
|
||||
self.assertIdentical(list(xa_for_each(xa)), [])
|
||||
|
||||
entries = list(xa_for_each(xa, advanced=True))
|
||||
self.assertEqual(len(entries), 1)
|
||||
self.assertEqual(entries[0][0], 666)
|
||||
self.assertTrue(xa_is_zero(entries[0][1]))
|
||||
|
||||
def test_xa_load_zero_entry_at_zero(self):
|
||||
xa = self.prog["drgn_test_xarray_zero_entry_at_zero"].address_of_()
|
||||
self.assertIdentical(xa_load(xa, 0), NULL(self.prog, "void *"))
|
||||
self.assertTrue(xa_is_zero(xa_load(xa, 0, advanced=True)))
|
||||
self.assertIdentical(xa_load(xa, 1), NULL(self.prog, "void *"))
|
||||
|
||||
def test_xa_for_each_zero_entry_at_zero(self):
|
||||
xa = self.prog["drgn_test_xarray_zero_entry_at_zero"].address_of_()
|
||||
self.assertIdentical(list(xa_for_each(xa)), [])
|
||||
|
||||
entries = list(xa_for_each(xa, advanced=True))
|
||||
self.assertEqual(len(entries), 1)
|
||||
self.assertEqual(entries[0][0], 0)
|
||||
self.assertTrue(xa_is_zero(entries[0][1]))
|
||||
|
||||
def test_xa_is_value(self):
|
||||
self.assertTrue(
|
||||
xa_is_value(xa_load(self.prog["drgn_test_xarray_value"].address_of_(), 0))
|
||||
)
|
||||
self.assertFalse(
|
||||
xa_is_value(
|
||||
xa_load(self.prog["drgn_test_xarray_one_at_zero"].address_of_(), 0)
|
||||
)
|
||||
)
|
||||
|
||||
def test_xa_to_value(self):
|
||||
self.assertIdentical(
|
||||
xa_to_value(xa_load(self.prog["drgn_test_xarray_value"].address_of_(), 0)),
|
||||
Object(self.prog, "unsigned long", 1337),
|
||||
)
|
@ -16,9 +16,17 @@
|
||||
#include <linux/llist.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/rbtree_augmented.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/version.h>
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
|
||||
#define HAVE_XARRAY 1
|
||||
#include <linux/xarray.h>
|
||||
#else
|
||||
#define HAVE_XARRAY 0
|
||||
#endif
|
||||
|
||||
// list
|
||||
|
||||
@ -453,6 +461,169 @@ static int drgn_test_stack_trace_init(void)
|
||||
return kthread_park(drgn_test_kthread);
|
||||
}
|
||||
|
||||
// radixtree
|
||||
|
||||
RADIX_TREE(drgn_test_radix_tree_empty, GFP_KERNEL);
|
||||
RADIX_TREE(drgn_test_radix_tree_one, GFP_KERNEL);
|
||||
RADIX_TREE(drgn_test_radix_tree_one_at_zero, GFP_KERNEL);
|
||||
RADIX_TREE(drgn_test_radix_tree_sparse, GFP_KERNEL);
|
||||
#ifdef CONFIG_RADIX_TREE_MULTIORDER
|
||||
RADIX_TREE(drgn_test_radix_tree_multi_order, GFP_KERNEL);
|
||||
#endif
|
||||
|
||||
static int drgn_test_radix_tree_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = radix_tree_insert(&drgn_test_radix_tree_one, 666,
|
||||
(void *)0xdeadb00);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = radix_tree_insert(&drgn_test_radix_tree_one_at_zero, 0,
|
||||
(void *)0x1234);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = radix_tree_insert(&drgn_test_radix_tree_sparse, 1,
|
||||
(void *)0x1234);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = radix_tree_insert(&drgn_test_radix_tree_sparse, 0x80808080,
|
||||
(void *)0x5678);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = radix_tree_insert(&drgn_test_radix_tree_sparse, 0xffffffff,
|
||||
(void *)0x9abc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
#ifdef CONFIG_RADIX_TREE_MULTIORDER
|
||||
ret = __radix_tree_insert(&drgn_test_radix_tree_multi_order, 0x80808000,
|
||||
9, (void *)0x1234);
|
||||
if (ret)
|
||||
return ret;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drgn_test_radix_tree_destroy(struct radix_tree_root *root)
|
||||
{
|
||||
struct radix_tree_iter iter;
|
||||
void __rcu **slot;
|
||||
|
||||
radix_tree_for_each_slot(slot, root, &iter, 0)
|
||||
radix_tree_delete(root, iter.index);
|
||||
}
|
||||
|
||||
static void drgn_test_radix_tree_exit(void)
|
||||
{
|
||||
drgn_test_radix_tree_destroy(&drgn_test_radix_tree_one);
|
||||
drgn_test_radix_tree_destroy(&drgn_test_radix_tree_one_at_zero);
|
||||
drgn_test_radix_tree_destroy(&drgn_test_radix_tree_sparse);
|
||||
#ifdef CONFIG_RADIX_TREE_MULTIORDER
|
||||
drgn_test_radix_tree_destroy(&drgn_test_radix_tree_multi_order);
|
||||
#endif
|
||||
}
|
||||
|
||||
// xarray
|
||||
const int drgn_test_have_xarray = HAVE_XARRAY;
|
||||
#if HAVE_XARRAY
|
||||
DEFINE_XARRAY(drgn_test_xarray_empty);
|
||||
DEFINE_XARRAY(drgn_test_xarray_one);
|
||||
DEFINE_XARRAY(drgn_test_xarray_one_at_zero);
|
||||
DEFINE_XARRAY(drgn_test_xarray_sparse);
|
||||
DEFINE_XARRAY(drgn_test_xarray_multi_index);
|
||||
DEFINE_XARRAY(drgn_test_xarray_zero_entry);
|
||||
DEFINE_XARRAY(drgn_test_xarray_zero_entry_at_zero);
|
||||
DEFINE_XARRAY(drgn_test_xarray_value);
|
||||
void *drgn_test_xa_zero_entry;
|
||||
|
||||
static int drgn_test_xa_store_order(struct xarray *xa, unsigned long index,
|
||||
unsigned order, void *entry, gfp_t gfp)
|
||||
{
|
||||
XA_STATE_ORDER(xas, xa, index, order);
|
||||
|
||||
do {
|
||||
xas_lock(&xas);
|
||||
xas_store(&xas, entry);
|
||||
xas_unlock(&xas);
|
||||
} while (xas_nomem(&xas, gfp));
|
||||
return xas_error(&xas);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int drgn_test_xarray_init(void)
|
||||
{
|
||||
#if HAVE_XARRAY
|
||||
void *entry;
|
||||
int ret;
|
||||
|
||||
drgn_test_xa_zero_entry = XA_ZERO_ENTRY;
|
||||
|
||||
entry = xa_store(&drgn_test_xarray_one, 666, (void *)0xdeadb00,
|
||||
GFP_KERNEL);
|
||||
if (xa_is_err(entry))
|
||||
return xa_err(entry);
|
||||
|
||||
entry = xa_store(&drgn_test_xarray_one_at_zero, 0, (void *)0x1234,
|
||||
GFP_KERNEL);
|
||||
if (xa_is_err(entry))
|
||||
return xa_err(entry);
|
||||
|
||||
entry = xa_store(&drgn_test_xarray_sparse, 1, (void *)0x1234,
|
||||
GFP_KERNEL);
|
||||
if (xa_is_err(entry))
|
||||
return xa_err(entry);
|
||||
entry = xa_store(&drgn_test_xarray_sparse, 0x80808080, (void *)0x5678,
|
||||
GFP_KERNEL);
|
||||
if (xa_is_err(entry))
|
||||
return xa_err(entry);
|
||||
entry = xa_store(&drgn_test_xarray_sparse, 0xffffffffUL, (void *)0x9abc,
|
||||
GFP_KERNEL);
|
||||
if (xa_is_err(entry))
|
||||
return xa_err(entry);
|
||||
|
||||
ret = drgn_test_xa_store_order(&drgn_test_xarray_multi_index,
|
||||
0x80808000, 9, (void *)0x1234,
|
||||
GFP_KERNEL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = xa_reserve(&drgn_test_xarray_zero_entry, 666, GFP_KERNEL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = xa_reserve(&drgn_test_xarray_zero_entry_at_zero, 0, GFP_KERNEL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
entry = xa_store(&drgn_test_xarray_value, 0, xa_mk_value(1337),
|
||||
GFP_KERNEL);
|
||||
if (xa_is_err(entry))
|
||||
return xa_err(entry);
|
||||
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drgn_test_xarray_exit(void)
|
||||
{
|
||||
#if HAVE_XARRAY
|
||||
xa_destroy(&drgn_test_xarray_one);
|
||||
xa_destroy(&drgn_test_xarray_one_at_zero);
|
||||
xa_destroy(&drgn_test_xarray_sparse);
|
||||
xa_destroy(&drgn_test_xarray_multi_index);
|
||||
xa_destroy(&drgn_test_xarray_zero_entry);
|
||||
xa_destroy(&drgn_test_xarray_zero_entry_at_zero);
|
||||
xa_destroy(&drgn_test_xarray_value);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Dummy function symbol.
|
||||
int drgn_test_function(int x)
|
||||
{
|
||||
@ -465,6 +636,8 @@ static void drgn_test_exit(void)
|
||||
drgn_test_percpu_exit();
|
||||
drgn_test_mm_exit();
|
||||
drgn_test_stack_trace_exit();
|
||||
drgn_test_radix_tree_exit();
|
||||
drgn_test_xarray_exit();
|
||||
}
|
||||
|
||||
static int __init drgn_test_init(void)
|
||||
@ -484,6 +657,12 @@ static int __init drgn_test_init(void)
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = drgn_test_stack_trace_init();
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = drgn_test_radix_tree_init();
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = drgn_test_xarray_init();
|
||||
out:
|
||||
if (ret)
|
||||
drgn_test_exit();
|
||||
|
Loading…
Reference in New Issue
Block a user