diff --git a/rclpy/CMakeLists.txt b/rclpy/CMakeLists.txt
index a9966c040..b8192e46a 100644
--- a/rclpy/CMakeLists.txt
+++ b/rclpy/CMakeLists.txt
@@ -156,6 +156,11 @@ if(BUILD_TESTING)
if(NOT _typesupport_impls STREQUAL "")
# Run each test in its own pytest invocation to isolate any global state in rclpy
set(_rclpy_pytest_tests
+ test/benchmark/test_benchmark_actions.py
+ test/benchmark/test_benchmark_client_service.py
+ test/benchmark/test_benchmark_guard_condition.py
+ test/benchmark/test_benchmark_pub_sub.py
+ test/benchmark/test_benchmark_timer.py
test/test_action_client.py
test/test_action_graph.py
test/test_action_server.py
@@ -198,8 +203,19 @@ if(BUILD_TESTING)
)
foreach(_test_path ${_rclpy_pytest_tests})
+ if(NOT AMENT_RUN_PERFORMANCE_TESTS)
+ get_filename_component(_test_dir ${_test_path} DIRECTORY)
+ get_filename_component(_test_dir ${_test_dir} NAME)
+ if(_test_dir STREQUAL "benchmark")
+ # Skip benchmark tests by default
+ set(_SKIP_TEST "SKIP_TEST")
+ else()
+ set(_SKIP_TEST "")
+ endif()
+ endif()
get_filename_component(_test_name ${_test_path} NAME_WE)
ament_add_pytest_test(${_test_name} ${_test_path}
+ ${_SKIP_TEST}
APPEND_ENV AMENT_PREFIX_PATH=${ament_index_build_path}
PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}
TIMEOUT 120
diff --git a/rclpy/package.xml b/rclpy/package.xml
index d9b661d29..22332a19e 100644
--- a/rclpy/package.xml
+++ b/rclpy/package.xml
@@ -44,6 +44,7 @@
ament_lint_auto
ament_lint_common
python3-pytest
+ python3-pytest-benchmark
rosidl_generator_py
test_msgs
diff --git a/rclpy/test/benchmark/test_benchmark_actions.py b/rclpy/test/benchmark/test_benchmark_actions.py
new file mode 100644
index 000000000..53df54077
--- /dev/null
+++ b/rclpy/test/benchmark/test_benchmark_actions.py
@@ -0,0 +1,76 @@
+# Copyright 2022 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from action_msgs.msg import GoalStatus
+import rclpy
+from rclpy.action import ActionClient, ActionServer
+from rclpy.executors import SingleThreadedExecutor
+from rclpy.qos import qos_profile_services_default, ReliabilityPolicy
+from test_msgs.action import Fibonacci
+
+
+ONE_HUNDRED = 100
+
+
+def test_one_hundred_goals(benchmark):
+ context = rclpy.context.Context()
+ rclpy.init(context=context)
+ try:
+ node = rclpy.create_node('benchmark_actions', context=context)
+ qos = qos_profile_services_default
+ qos.reliability = ReliabilityPolicy.RELIABLE
+
+ action_client = ActionClient(
+ node,
+ Fibonacci,
+ 'benchmark',
+ goal_service_qos_profile=qos,
+ result_service_qos_profile=qos)
+
+ def execute_cb(goal_handle):
+ goal_handle.succeed()
+ return Fibonacci.Result()
+
+ action_server = ActionServer(
+ node,
+ Fibonacci,
+ 'benchmark',
+ execute_cb,
+ goal_service_qos_profile=qos,
+ result_service_qos_profile=qos)
+
+ executor = SingleThreadedExecutor(context=context)
+ executor.add_node(node)
+
+ # Wait for client/server to discover each other
+ assert action_client.wait_for_server(timeout_sec=5.0)
+
+ def bm():
+ for _ in range(ONE_HUNDRED):
+ goal_fut = action_client.send_goal_async(Fibonacci.Goal())
+ executor.spin_until_future_complete(goal_fut)
+ client_goal_handle = goal_fut.result()
+ assert client_goal_handle.accepted
+ result_fut = client_goal_handle.get_result_async()
+ executor.spin_until_future_complete(result_fut)
+ assert GoalStatus.STATUS_SUCCEEDED == result_fut.result().status
+
+ benchmark(bm)
+
+ executor.shutdown()
+ action_client.destroy()
+ action_server.destroy()
+ node.destroy_node()
+ finally:
+ rclpy.shutdown(context=context)
diff --git a/rclpy/test/benchmark/test_benchmark_client_service.py b/rclpy/test/benchmark/test_benchmark_client_service.py
new file mode 100644
index 000000000..db88673da
--- /dev/null
+++ b/rclpy/test/benchmark/test_benchmark_client_service.py
@@ -0,0 +1,57 @@
+# Copyright 2022 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import rclpy
+from rclpy.executors import SingleThreadedExecutor
+from rclpy.qos import qos_profile_services_default, ReliabilityPolicy
+from test_msgs.srv import Empty as EmptySrv
+
+
+ONE_THOUSAND = 1000
+
+
+def test_one_thousand_service_calls(benchmark):
+ context = rclpy.context.Context()
+ rclpy.init(context=context)
+ try:
+ node = rclpy.create_node('benchmark_client_service', context=context)
+ qos = qos_profile_services_default
+ qos.reliability = ReliabilityPolicy.RELIABLE
+ client = node.create_client(EmptySrv, 'empty', qos_profile=qos)
+
+ def cb(_, response):
+ return response
+
+ service = node.create_service(EmptySrv, 'empty', cb, qos_profile=qos)
+
+ executor = SingleThreadedExecutor(context=context)
+ executor.add_node(node)
+
+ # Wait for client/service to discover each other
+ assert client.wait_for_service(timeout_sec=5.0)
+
+ def bm():
+ for _ in range(ONE_THOUSAND):
+ fut = client.call_async(EmptySrv.Request())
+ executor.spin_until_future_complete(fut)
+ assert fut.result() # Raises if there was an error
+
+ benchmark(bm)
+
+ executor.shutdown()
+ node.destroy_client(client)
+ node.destroy_service(service)
+ node.destroy_node()
+ finally:
+ rclpy.shutdown(context=context)
diff --git a/rclpy/test/benchmark/test_benchmark_guard_condition.py b/rclpy/test/benchmark/test_benchmark_guard_condition.py
new file mode 100644
index 000000000..05faabd12
--- /dev/null
+++ b/rclpy/test/benchmark/test_benchmark_guard_condition.py
@@ -0,0 +1,86 @@
+# Copyright 2022 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import rclpy
+from rclpy.executors import SingleThreadedExecutor
+
+
+ONE_THOUSAND = 1000
+
+
+def test_one_thousand_guard_callbacks(benchmark):
+ context = rclpy.context.Context()
+ rclpy.init(context=context)
+ try:
+ node = rclpy.create_node('benchmark_many_gc_calls', context=context)
+ num_calls = 0
+
+ def cb():
+ nonlocal num_calls
+ num_calls += 1
+
+ gc = node.create_guard_condition(cb)
+
+ executor = SingleThreadedExecutor(context=context)
+ executor.add_node(node)
+
+ def bm():
+ nonlocal num_calls
+ # Reset for each benchmark run
+ num_calls = 0
+ while num_calls < ONE_THOUSAND:
+ gc.trigger()
+ executor.spin_once(timeout_sec=0)
+
+ benchmark(bm)
+
+ executor.shutdown()
+ node.destroy_guard_condition(gc)
+ node.destroy_node()
+ finally:
+ rclpy.shutdown(context=context)
+
+
+def test_one_thousand_guard_coroutine_callbacks(benchmark):
+ context = rclpy.context.Context()
+ rclpy.init(context=context)
+ try:
+ node = rclpy.create_node('benchmark_many_gc_calls', context=context)
+ num_calls = 0
+
+ async def coro():
+ nonlocal num_calls
+ num_calls += 1
+
+ gc = node.create_guard_condition(coro)
+
+ executor = SingleThreadedExecutor(context=context)
+ executor.add_node(node)
+
+ def bm():
+ nonlocal num_calls
+ # Reset for each benchmark run
+ num_calls = 0
+ while num_calls < ONE_THOUSAND:
+ gc.trigger()
+ executor.spin_once(timeout_sec=0)
+ return num_calls == ONE_THOUSAND
+
+ assert benchmark(bm)
+
+ executor.shutdown()
+ node.destroy_guard_condition(gc)
+ node.destroy_node()
+ finally:
+ rclpy.shutdown(context=context)
diff --git a/rclpy/test/benchmark/test_benchmark_pub_sub.py b/rclpy/test/benchmark/test_benchmark_pub_sub.py
new file mode 100644
index 000000000..e3cf94ae7
--- /dev/null
+++ b/rclpy/test/benchmark/test_benchmark_pub_sub.py
@@ -0,0 +1,64 @@
+# Copyright 2022 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import rclpy
+from rclpy.executors import SingleThreadedExecutor
+from rclpy.qos import QoSProfile, ReliabilityPolicy
+from test_msgs.msg import Empty as EmptyMsg
+
+
+ONE_THOUSAND = 1000
+
+
+def test_one_thousand_messages(benchmark):
+ context = rclpy.context.Context()
+ rclpy.init(context=context)
+ try:
+ node = rclpy.create_node('benchmark_pub_sub', context=context)
+ qos = QoSProfile(depth=1, reliability=ReliabilityPolicy.RELIABLE)
+ pub = node.create_publisher(EmptyMsg, 'empty', qos)
+ num_calls = 0
+
+ def cb(_):
+ nonlocal num_calls
+ num_calls += 1
+ # Send next message
+ pub.publish(EmptyMsg())
+
+ sub = node.create_subscription(EmptyMsg, 'empty', cb, qos)
+
+ executor = SingleThreadedExecutor(context=context)
+ executor.add_node(node)
+
+ # Wait for pub/sub to discover each other
+ while not pub.get_subscription_count():
+ executor.spin_once(timeout_sec=0.01)
+
+ def bm():
+ nonlocal num_calls
+ # Reset for each benchmark run
+ num_calls = 0
+ # Prime the loop with a message
+ pub.publish(EmptyMsg())
+ while num_calls < ONE_THOUSAND:
+ executor.spin_once(timeout_sec=0)
+
+ benchmark(bm)
+
+ executor.shutdown()
+ node.destroy_publisher(pub)
+ node.destroy_subscription(sub)
+ node.destroy_node()
+ finally:
+ rclpy.shutdown(context=context)
diff --git a/rclpy/test/benchmark/test_benchmark_timer.py b/rclpy/test/benchmark/test_benchmark_timer.py
new file mode 100644
index 000000000..9d8bbe5e2
--- /dev/null
+++ b/rclpy/test/benchmark/test_benchmark_timer.py
@@ -0,0 +1,83 @@
+# Copyright 2022 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import rclpy
+from rclpy.executors import SingleThreadedExecutor
+
+
+ONE_THOUSAND = 1000
+
+
+def test_one_thousand_timer_coroutine_callbacks(benchmark):
+ context = rclpy.context.Context()
+ rclpy.init(context=context)
+ try:
+ node = rclpy.create_node('benchmark_many_timer_calls', context=context)
+ num_calls = 0
+
+ async def timer_coro():
+ nonlocal num_calls
+ num_calls += 1
+
+ timer = node.create_timer(0, timer_coro)
+
+ executor = SingleThreadedExecutor(context=context)
+ executor.add_node(node)
+
+ def bm():
+ nonlocal num_calls
+ # Reset for each benchmark run
+ num_calls = 0
+ while num_calls < ONE_THOUSAND:
+ executor.spin_once(timeout_sec=0)
+
+ benchmark(bm)
+
+ executor.shutdown()
+ node.destroy_timer(timer)
+ node.destroy_node()
+ finally:
+ rclpy.shutdown(context=context)
+
+
+def test_one_thousand_timer_callbacks(benchmark):
+ context = rclpy.context.Context()
+ rclpy.init(context=context)
+ try:
+ node = rclpy.create_node('benchmark_many_timer_calls', context=context)
+ num_calls = 0
+
+ def timer_cb():
+ nonlocal num_calls
+ num_calls += 1
+
+ timer = node.create_timer(0, timer_cb)
+
+ executor = SingleThreadedExecutor(context=context)
+ executor.add_node(node)
+
+ def bm():
+ nonlocal num_calls
+ # Reset for each benchmark run
+ num_calls = 0
+ while num_calls < ONE_THOUSAND:
+ executor.spin_once(timeout_sec=0)
+
+ benchmark(bm)
+
+ executor.shutdown()
+ node.destroy_timer(timer)
+ node.destroy_node()
+ finally:
+ rclpy.shutdown(context=context)